The following commit has been merged in the master branch: commit e47e0a632612d3cbedbe3570cf825177a10d2821 Merge: 18f9c7f4291197f3f4c1949b9bc5705954cb2574 a9e41a529681b38087c91ebc0bb91e12f510ca2d Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed May 8 11:26:30 2019 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined .mailmap index 0d67c3a7a7d5,a51547ac96f9..07a777f9d687 --- a/.mailmap +++ b/.mailmap @@@ -16,8 -16,9 +16,11 @@@ Alan Cox <alan@lxorguk.ukuu.org.uk Alan Cox root@hraefn.swansea.linux.org.uk Aleksey Gorelov aleksey_gorelov@phoenix.com Aleksandar Markovic aleksandar.markovic@mips.com aleksandar.markovic@imgtec.com +Alex Shi alex.shi@linux.alibaba.com alex.shi@intel.com +Alex Shi alex.shi@linux.alibaba.com alex.shi@linaro.org + Alexei Starovoitov ast@kernel.org ast@plumgrid.com + Alexei Starovoitov ast@kernel.org alexei.starovoitov@gmail.com + Alexei Starovoitov ast@kernel.org ast@fb.com Al Viro viro@ftp.linux.org.uk Al Viro viro@zenIV.linux.org.uk Andi Shyti andi@etezian.org andi.shyti@samsung.com @@@ -48,6 -49,12 +51,12 @@@ Christoph Hellwig <hch@lst.de Christophe Ricard christophe.ricard@gmail.com Corey Minyard minyard@acm.org Damian Hobson-Garcia dhobsong@igel.co.jp + Daniel Borkmann daniel@iogearbox.net dborkman@redhat.com + Daniel Borkmann daniel@iogearbox.net dborkmann@redhat.com + Daniel Borkmann daniel@iogearbox.net danborkmann@iogearbox.net + Daniel Borkmann daniel@iogearbox.net daniel.borkmann@tik.ee.ethz.ch + Daniel Borkmann daniel@iogearbox.net danborkmann@googlemail.com + Daniel Borkmann daniel@iogearbox.net dxchgb@gmail.com David Brownell david-b@pacbell.net David Woodhouse dwmw2@shinybook.infradead.org Dengcheng Zhu dzhu@wavecomp.com dengcheng.zhu@mips.com @@@ -119,8 -126,6 +128,8 @@@ Leonid I Ananiev <leonid.i.ananiev@inte Linas Vepstas linas@austin.ibm.com Linus Lüssing linus.luessing@c0d3.blue linus.luessing@web.de Linus Lüssing linus.luessing@c0d3.blue linus.luessing@ascom.ch +Li Yang leoyang.li@nxp.com leo@zh-kernel.org +Li Yang leoyang.li@nxp.com leoli@freescale.com Maciej W. Rozycki macro@mips.com macro@imgtec.com Marcin Nowakowski marcin.nowakowski@mips.com marcin.nowakowski@imgtec.com Mark Brown broonie@sirena.org.uk @@@ -193,7 -198,6 +202,7 @@@ Santosh Shilimkar <ssantosh@kernel.org Santosh Shilimkar santosh.shilimkar@oracle.org Sascha Hauer s.hauer@pengutronix.de S.Çağlar Onur caglar@pardus.org.tr +Sean Nyekjaer sean@geanix.com sean.nyekjaer@prevas.dk Sebastian Reichel sre@kernel.org sre@debian.org Sebastian Reichel sre@kernel.org sebastian.reichel@collabora.co.uk Shiraz Hashim shiraz.linux.kernel@gmail.com shiraz.hashim@st.com @@@ -212,8 -216,6 +221,8 @@@ Tejun Heo <htejun@gmail.com Thomas Graf tgraf@suug.ch Thomas Pedersen twp@codeaurora.org Tony Luck tony.luck@intel.com +TripleX Chung xxx.phy@gmail.com zhongyu@18mail.cn +TripleX Chung xxx.phy@gmail.com triplex@zh-kernel.org Tsuneo Yoshioka Tsuneo.Yoshioka@f-secure.com Uwe Kleine-König ukleinek@informatik.uni-freiburg.de Uwe Kleine-König ukl@pengutronix.de diff --combined MAINTAINERS index e2bd514ca559,4f6ea0c1d8b5..bfbd5775a144 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -268,13 -268,12 +268,13 @@@ L: linux-gpio@vger.kernel.or S: Maintained F: drivers/gpio/gpio-104-idio-16.c
-ACCES 104-QUAD-8 IIO DRIVER +ACCES 104-QUAD-8 DRIVER M: William Breathitt Gray vilhelm.gray@gmail.com L: linux-iio@vger.kernel.org S: Maintained +F: Documentation/ABI/testing/sysfs-bus-counter-104-quad-8 F: Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 -F: drivers/iio/counter/104-quad-8.c +F: drivers/counter/104-quad-8.c
ACCES PCI-IDIO-16 GPIO DRIVER M: William Breathitt Gray vilhelm.gray@gmail.com @@@ -469,7 -468,7 +469,7 @@@ ADM1025 HARDWARE MONITOR DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/adm1025 +F: Documentation/hwmon/adm1025.rst F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER @@@ -521,7 -520,7 +521,7 @@@ ADS1015 HARDWARE MONITOR DRIVE M: Dirk Eibach eibach@gdsys.de L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/ads1015 +F: Documentation/hwmon/ads1015.rst F: drivers/hwmon/ads1015.c F: include/linux/platform_data/ads1015.h
@@@ -534,7 -533,7 +534,7 @@@ ADT7475 HARDWARE MONITOR DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/adt7475 +F: Documentation/hwmon/adt7475.rst F: drivers/hwmon/adt7475.c
ADVANSYS SCSI DRIVER @@@ -765,7 -764,7 +765,7 @@@ AMD FAM15H PROCESSOR POWER MONITORING D M: Huang Rui ray.huang@amd.com L: linux-hwmon@vger.kernel.org S: Supported -F: Documentation/hwmon/fam15h_power +F: Documentation/hwmon/fam15h_power.rst F: drivers/hwmon/fam15h_power.c
AMD FCH GPIO DRIVER @@@ -817,14 -816,6 +817,14 @@@ F: drivers/gpu/drm/amd/include/vi_struc F: drivers/gpu/drm/amd/include/v9_structs.h F: include/uapi/linux/kfd_ioctl.h
+AMD MP2 I2C DRIVER +M: Elie Morisse syniurge@gmail.com +M: Nehal Shah nehal-bakulchandra.shah@amd.com +M: Shyam Sundar S K shyam-sundar.s-k@amd.com +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/busses/i2c-amd-mp2* + AMD POWERPLAY M: Rex Zhu rex.zhu@amd.com M: Evan Quan evan.quan@amd.com @@@ -877,7 -868,7 +877,7 @@@ L: linux-iio@vger.kernel.or W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/adc/ad7606.c -F: Documentation/devicetree/bindings/iio/adc/ad7606.txt +F: Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
ANALOG DEVICES INC AD7768-1 DRIVER M: Stefan Popa stefan.popa@analog.com @@@ -959,7 -950,6 +959,7 @@@ F: drivers/dma/dma-axi-dmac. ANALOG DEVICES INC IIO DRIVERS M: Lars-Peter Clausen lars@metafoo.de M: Michael Hennerich Michael.Hennerich@analog.com +M: Stefan Popa stefan.popa@analog.com W: http://wiki.analog.com/ W: http://ez.analog.com/community/linux-device-drivers S: Supported @@@ -970,16 -960,10 +970,16 @@@ F: drivers/iio/adc/ltc2497 X: drivers/iio/*/adjd* F: drivers/staging/iio/*/ad*
+ANALOGBITS PLL LIBRARIES +M: Paul Walmsley paul.walmsley@sifive.com +S: Supported +F: drivers/clk/analogbits/* +F: include/linux/clk/analogbits* + ANDES ARCHITECTURE M: Greentime Hu green.hu@gmail.com M: Vincent Chen deanbo422@gmail.com -T: git https://github.com/andestech/linux.git +T: git https://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux.git S: Supported F: arch/nds32/ F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt @@@ -1701,21 -1685,11 +1701,21 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained
ARM/INTEL IXP4XX ARM ARCHITECTURE +M: Linus Walleij linusw@kernel.org M: Imre Kaloz kaloz@openwrt.org M: Krzysztof Halasa khalasa@piap.pl L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml +F: Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt +F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml +F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml F: arch/arm/mach-ixp4xx/ +F: drivers/clocksource/timer-ixp4xx.c +F: drivers/gpio/gpio-ixp4xx.c +F: drivers/irqchip/irq-ixp4xx.c +F: include/linux/irqchip/irq-ixp4xx.h +F: include/linux/platform_data/timer-ixp4xx.h
ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT M: Jonathan Cameron jic23@cam.ac.uk @@@ -2011,7 -1985,7 +2011,7 @@@ W: http://www.armlinux.org.uk S: Maintained
ARM/QUALCOMM SUPPORT -M: Andy Gross andy.gross@linaro.org +M: Andy Gross agross@kernel.org M: David Brown david.brown@linaro.org L: linux-arm-msm@vger.kernel.org S: Maintained @@@ -2216,7 -2190,6 +2216,7 @@@ F: arch/arm/mach-socfpga F: arch/arm/boot/dts/socfpga* F: arch/arm/configs/socfpga_defconfig F: arch/arm64/boot/dts/altera/ +F: arch/arm64/boot/dts/intel/ W: http://www.rocketboards.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
@@@ -2540,7 -2513,7 +2540,7 @@@ ASC7621 HARDWARE MONITOR DRIVE M: George Joseph george.joseph@fairview5.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/asc7621 +F: Documentation/hwmon/asc7621.rst F: drivers/hwmon/asc7621.c
ASPEED VIDEO ENGINE DRIVER @@@ -2587,7 -2560,7 +2587,7 @@@ F: include/linux/dmaengine. F: include/linux/async_tx.h
AT24 EEPROM DRIVER -M: Bartosz Golaszewski brgl@bgdev.pl +M: Bartosz Golaszewski bgolaszewski@baylibre.com L: linux-i2c@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git S: Maintained @@@ -2821,10 -2794,13 +2821,13 @@@ M: Simon Wunderlich <sw@simonwunderlich M: Antonio Quartulli a@unstable.cc L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ + B: https://www.open-mesh.org/projects/batman-adv/issues + C: irc://chat.freenode.net/batman Q: https://patchwork.open-mesh.org/project/batman/list/ + T: git https://git.open-mesh.org/linux-merge.git S: Maintained - F: Documentation/ABI/testing/sysfs-class-net-batman-adv - F: Documentation/ABI/testing/sysfs-class-net-mesh + F: Documentation/ABI/obsolete/sysfs-class-net-batman-adv + F: Documentation/ABI/obsolete/sysfs-class-net-mesh F: Documentation/networking/batman-adv.rst F: include/uapi/linux/batadv_packet.h F: include/uapi/linux/batman_adv.h @@@ -3380,7 -3356,7 +3383,7 @@@ F: include/uapi/linux/bsg. BT87X AUDIO DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) -T: git git://git.alsa-project.org/alsa-kernel.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Maintained F: Documentation/sound/cards/bt87x.rst F: sound/pci/bt87x.c @@@ -3433,7 -3409,7 +3436,7 @@@ F: drivers/scsi/FlashPoint. C-MEDIA CMI8788 DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) -T: git git://git.alsa-project.org/alsa-kernel.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Maintained F: sound/pci/oxygen/
@@@ -3757,8 -3733,8 +3760,8 @@@ F: scripts/checkpatch.p
CHINESE DOCUMENTATION M: Harry Wei harryxiyou@gmail.com +M: Alex Shi alex.shi@linux.alibaba.com L: xiyoulinuxkernelgroup@googlegroups.com (subscribers-only) -L: linux-kernel@zh-kernel.org (moderated for non-subscribers) S: Maintained F: Documentation/translations/zh_CN/
@@@ -3825,21 -3801,16 +3828,21 @@@ M: Richard Fitzgerald <rf@opensource.ci L: patches@opensource.cirrus.com S: Supported F: drivers/clk/clk-lochnagar.c +F: drivers/hwmon/lochnagar-hwmon.c F: drivers/mfd/lochnagar-i2c.c F: drivers/pinctrl/cirrus/pinctrl-lochnagar.c F: drivers/regulator/lochnagar-regulator.c +F: sound/soc/codecs/lochnagar-sc.c F: include/dt-bindings/clk/lochnagar.h F: include/dt-bindings/pinctrl/lochnagar.h F: include/linux/mfd/lochnagar* F: Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt F: Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt +F: Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt +F: Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt +F: Documentation/hwmon/lochnagar
CISCO FCOE HBA DRIVER M: Satish Kharat satishkh@cisco.com @@@ -4077,7 -4048,7 +4080,7 @@@ CORETEMP HARDWARE MONITORING DRIVE M: Fenghua Yu fenghua.yu@intel.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/coretemp +F: Documentation/hwmon/coretemp.rst F: drivers/hwmon/coretemp.c
COSA/SRP SYNC SERIAL DRIVER @@@ -4086,16 -4057,6 +4089,16 @@@ W: http://www.fi.muni.cz/~kas/cosa S: Maintained F: drivers/net/wan/cosa*
+COUNTER SUBSYSTEM +M: William Breathitt Gray vilhelm.gray@gmail.com +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/ABI/testing/sysfs-bus-counter* +F: Documentation/driver-api/generic-counter.rst +F: drivers/counter/ +F: include/linux/counter.h +F: include/linux/counter_enum.h + CPMAC ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org @@@ -4290,7 -4251,7 +4293,7 @@@ S: Supporte F: drivers/scsi/cxgbi/cxgb3i
CXGB3 IWARP RNIC DRIVER (IW_CXGB3) -M: Steve Wise swise@chelsio.com +M: Potnuri Bharat Teja bharat@chelsio.com L: linux-rdma@vger.kernel.org W: http://www.openfabrics.org S: Supported @@@ -4319,7 -4280,7 +4322,7 @@@ S: Supporte F: drivers/scsi/cxgbi/cxgb4i
CXGB4 IWARP RNIC DRIVER (IW_CXGB4) -M: Steve Wise swise@chelsio.com +M: Potnuri Bharat Teja bharat@chelsio.com L: linux-rdma@vger.kernel.org W: http://www.openfabrics.org S: Supported @@@ -4335,7 -4296,7 +4338,7 @@@ F: drivers/net/ethernet/chelsio/cxgb4vf
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER M: Frederic Barrat fbarrat@linux.ibm.com -M: Andrew Donnellan andrew.donnellan@au1.ibm.com +M: Andrew Donnellan ajd@linux.ibm.com L: linuxppc-dev@lists.ozlabs.org S: Supported F: arch/powerpc/platforms/powernv/pci-cxl.c @@@ -4595,7 -4556,6 +4598,7 @@@ S: Maintaine F: drivers/devfreq/ F: include/linux/devfreq.h F: Documentation/devicetree/bindings/devfreq/ +F: include/trace/events/devfreq.h
DEVICE FREQUENCY EVENT (DEVFREQ-EVENT) M: Chanwoo Choi cw00.choi@samsung.com @@@ -4643,7 -4603,7 +4646,7 @@@ DIALOG SEMICONDUCTOR DRIVER M: Support Opensource support.opensource@diasemi.com W: http://www.dialog-semiconductor.com/products S: Supported -F: Documentation/hwmon/da90?? +F: Documentation/hwmon/da90??.rst F: Documentation/devicetree/bindings/mfd/da90*.txt F: Documentation/devicetree/bindings/input/da90??-onkey.txt F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt @@@ -4794,7 -4754,7 +4797,7 @@@ DME1737 HARDWARE MONITOR DRIVE M: Juerg Haefliger juergh@gmail.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/dme1737 +F: Documentation/hwmon/dme1737.rst F: drivers/hwmon/dme1737.c
DMI/SMBIOS SUPPORT @@@ -5641,12 -5601,6 +5644,12 @@@ L: linux-edac@vger.kernel.or S: Maintained F: drivers/edac/ghes_edac.c
+EDAC-I10NM +M: Tony Luck tony.luck@intel.com +L: linux-edac@vger.kernel.org +S: Maintained +F: drivers/edac/i10nm_base.c + EDAC-I3000 L: linux-edac@vger.kernel.org S: Orphan @@@ -5728,7 -5682,7 +5731,7 @@@ EDAC-SKYLAK M: Tony Luck tony.luck@intel.com L: linux-edac@vger.kernel.org S: Maintained -F: drivers/edac/skx_edac.c +F: drivers/edac/skx_*.c
EDAC-TI M: Tero Kristo t-kristo@ti.com @@@ -5747,7 -5701,7 +5750,7 @@@ F: drivers/edac/qcom_edac. EDIROL UA-101/UA-1000 DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) -T: git git://git.alsa-project.org/alsa-kernel.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Maintained F: sound/usb/misc/ua101.c
@@@ -5986,7 -5940,7 +5989,7 @@@ F71805F HARDWARE MONITORING DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/f71805f +F: Documentation/hwmon/f71805f.rst F: drivers/hwmon/f71805f.c
FADDR2LINE @@@ -6087,7 -6041,7 +6090,7 @@@ F: include/linux/f75375s. FIREWIRE AUDIO DRIVERS M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) -T: git git://git.alsa-project.org/alsa-kernel.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Maintained F: sound/firewire/
@@@ -6511,7 -6465,7 +6514,7 @@@ S: Maintaine F: drivers/media/radio/radio-gemtek*
GENERIC GPIO I2C DRIVER -M: Haavard Skinnemoen hskinnemoen@gmail.com +M: Wolfram Sang wsa+renesas@sang-engineering.com S: Supported F: drivers/i2c/busses/i2c-gpio.c F: include/linux/platform_data/i2c-gpio.h @@@ -6643,7 -6597,7 +6646,7 @@@ M: Andy Shevchenko <andriy.shevchenko@l L: linux-gpio@vger.kernel.org L: linux-acpi@vger.kernel.org S: Maintained -F: Documentation/acpi/gpio-properties.txt +F: Documentation/firmware-guide/acpi/gpio-properties.rst F: drivers/gpio/gpiolib-acpi.c
GPIO IR Transmitter @@@ -7435,12 -7389,13 +7438,12 @@@ S: Supporte F: drivers/net/ethernet/ibm/ibmvnic.*
IBM Power Virtual Accelerator Switchboard -M: Sukadev Bhattiprolu +M: Sukadev Bhattiprolu sukadev@linux.ibm.com L: linuxppc-dev@lists.ozlabs.org S: Supported F: arch/powerpc/platforms/powernv/vas* F: arch/powerpc/platforms/powernv/copy-paste.h F: arch/powerpc/include/asm/vas.h -F: arch/powerpc/include/uapi/asm/vas.h
IBM Power Virtual Ethernet Device Driver M: Thomas Falcon tlfalcon@linux.ibm.com @@@ -7487,14 -7442,14 +7490,14 @@@ F: drivers/crypto/vmx/ghash F: drivers/crypto/vmx/ppc-xlate.pl
IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform -M: Tyrel Datwyler tyreld@linux.vnet.ibm.com +M: Tyrel Datwyler tyreld@linux.ibm.com L: linux-pci@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Supported F: drivers/pci/hotplug/rpaphp*
IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform -M: Tyrel Datwyler tyreld@linux.vnet.ibm.com +M: Tyrel Datwyler tyreld@linux.ibm.com L: linux-pci@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Supported @@@ -7666,7 -7621,7 +7669,7 @@@ INA209 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/ina209 +F: Documentation/hwmon/ina209.rst F: Documentation/devicetree/bindings/hwmon/ina2xx.txt F: drivers/hwmon/ina209.c
@@@ -7674,7 -7629,7 +7677,7 @@@ INA2XX HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/ina2xx +F: Documentation/hwmon/ina2xx.rst F: drivers/hwmon/ina2xx.c F: include/linux/platform_data/ina2xx.h
@@@ -7701,10 -7656,6 +7704,10 @@@ F: drivers/infiniband F: include/uapi/linux/if_infiniband.h F: include/uapi/rdma/ F: include/rdma/ +F: include/trace/events/ib_mad.h +F: include/trace/events/ib_umad.h +F: samples/bpf/ibumad_kern.c +F: samples/bpf/ibumad_user.c
INGENIC JZ4780 DMA Driver M: Zubair Lutfullah Kakakhel Zubair.Kakakhel@imgtec.com @@@ -7928,10 -7879,10 +7931,10 @@@ F: Documentation/media/v4l-drivers/ipu3 INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT M: Krzysztof Halasa khalasa@piap.pl S: Maintained -F: arch/arm/mach-ixp4xx/include/mach/qmgr.h -F: arch/arm/mach-ixp4xx/include/mach/npe.h -F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c -F: arch/arm/mach-ixp4xx/ixp4xx_npe.c +F: include/linux/soc/ixp4xx/qmgr.h +F: include/linux/soc/ixp4xx/npe.h +F: drivers/soc/ixp4xx/ixp4xx-qmgr.c +F: drivers/soc/ixp4xx/ixp4xx-npe.c F: drivers/net/ethernet/xscale/ixp4xx_eth.c F: drivers/net/wan/ixp4xx_hss.c
@@@ -8098,7 -8049,6 +8101,7 @@@ F: drivers/gpio/gpio-intel-mid.
INTERCONNECT API M: Georgi Djakov georgi.djakov@linaro.org +L: linux-pm@vger.kernel.org S: Maintained F: Documentation/interconnect/ F: Documentation/devicetree/bindings/interconnect/ @@@ -8307,7 -8257,7 +8310,7 @@@ IT87 HARDWARE MONITORING DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/it87 +F: Documentation/hwmon/it87.rst F: drivers/hwmon/it87.c
IT913X MEDIA DRIVER @@@ -8351,7 -8301,7 +8354,7 @@@ M: Guenter Roeck <linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/jc42.c -F: Documentation/hwmon/jc42 +F: Documentation/hwmon/jc42.rst
JFS FILESYSTEM M: Dave Kleikamp shaggy@kernel.org @@@ -8399,14 -8349,14 +8402,14 @@@ K10TEMP HARDWARE MONITORING DRIVE M: Clemens Ladisch clemens@ladisch.de L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/k10temp +F: Documentation/hwmon/k10temp.rst F: drivers/hwmon/k10temp.c
K8TEMP HARDWARE MONITORING DRIVER M: Rudolf Marek r.marek@assembler.cz L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/k8temp +F: Documentation/hwmon/k8temp.rst F: drivers/hwmon/k8temp.c
KASAN @@@ -9047,7 -8997,7 +9050,7 @@@ R: Daniel Lustig <dlustig@nvidia.com L: linux-kernel@vger.kernel.org L: linux-arch@vger.kernel.org S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: tools/memory-model/ F: Documentation/atomic_bitops.txt F: Documentation/atomic_t.txt @@@ -9098,21 -9048,21 +9101,21 @@@ LM78 HARDWARE MONITOR DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/lm78 +F: Documentation/hwmon/lm78.rst F: drivers/hwmon/lm78.c
LM83 HARDWARE MONITOR DRIVER M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/lm83 +F: Documentation/hwmon/lm83.rst F: drivers/hwmon/lm83.c
LM90 HARDWARE MONITOR DRIVER M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/lm90 +F: Documentation/hwmon/lm90.rst F: Documentation/devicetree/bindings/hwmon/lm90.txt F: drivers/hwmon/lm90.c F: include/dt-bindings/thermal/lm90.h @@@ -9121,7 -9071,7 +9124,7 @@@ LM95234 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/lm95234 +F: Documentation/hwmon/lm95234.rst F: drivers/hwmon/lm95234.c
LME2510 MEDIA DRIVER @@@ -9153,6 -9103,7 +9156,6 @@@ F: arch/*/include/asm/spinlock*. F: include/linux/rwlock*.h F: include/linux/mutex*.h F: include/linux/rwsem*.h -F: arch/*/include/asm/rwsem.h F: include/linux/seqlock.h F: lib/locking*.[ch] F: kernel/locking/ @@@ -9194,7 -9145,7 +9197,7 @@@ LTC4261 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/ltc4261 +F: Documentation/hwmon/ltc4261.rst F: drivers/hwmon/ltc4261.c
LTC4306 I2C MULTIPLEXER DRIVER @@@ -9425,7 -9376,7 +9428,7 @@@ MAX16065 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/max16065 +F: Documentation/hwmon/max16065.rst F: drivers/hwmon/max16065.c
MAX2175 SDR TUNER DRIVER @@@ -9441,14 -9392,14 +9444,14 @@@ F: include/uapi/linux/max2175. MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER L: linux-hwmon@vger.kernel.org S: Orphan -F: Documentation/hwmon/max6650 +F: Documentation/hwmon/max6650.rst F: drivers/hwmon/max6650.c
MAX6697 HARDWARE MONITOR DRIVER M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/max6697 +F: Documentation/hwmon/max6697.rst F: Documentation/devicetree/bindings/hwmon/max6697.txt F: drivers/hwmon/max6697.c F: include/linux/platform_data/max6697.h @@@ -9460,13 -9411,6 +9463,13 @@@ S: Maintaine F: Documentation/devicetree/bindings/sound/max9860.txt F: sound/soc/codecs/max9860.*
+MAXBOTIX ULTRASONIC RANGER IIO DRIVER +M: Andreas Klinger ak@it-klinger.de +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt +F: drivers/iio/proximity/mb1232.c + MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER M: Javier Martinez Canillas javier@dowhile0.org L: linux-kernel@vger.kernel.org @@@ -9835,15 -9779,11 +9838,17 @@@ F: drivers/media/platform/mtk-vpu F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
+MEDIATEK MMC/SD/SDIO DRIVER +M: Chaotian Jing chaotian.jing@mediatek.com +S: Maintained +F: drivers/mmc/host/mtk-sd.c +F: Documentation/devicetree/bindings/mmc/mtk-sd.txt + MEDIATEK MT76 WIRELESS LAN DRIVER M: Felix Fietkau nbd@nbd.name M: Lorenzo Bianconi lorenzo.bianconi83@gmail.com + R: Ryder Lee ryder.lee@mediatek.com + R: Roy Luo royluo@google.com L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt76/ @@@ -9942,15 -9882,6 +9947,6 @@@ F: drivers/net/ethernet/mellanox/mlx5/c F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h
- MELLANOX ETHERNET INNOVA IPSEC DRIVER - R: Boris Pismenny borisp@mellanox.com - L: netdev@vger.kernel.org - S: Supported - W: http://www.mellanox.com - Q: http://patchwork.ozlabs.org/project/netdev/list/ - F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/* - F: drivers/net/ethernet/mellanox/mlx5/core/ipsec* - MELLANOX ETHERNET SWITCH DRIVERS M: Jiri Pirko jiri@mellanox.com M: Ido Schimmel idosch@mellanox.com @@@ -10107,7 -10038,7 +10103,7 @@@ F: drivers/mfd/menf21bmc. F: drivers/watchdog/menf21bmc_wdt.c F: drivers/leds/leds-menf21bmc.c F: drivers/hwmon/menf21bmc_hwmon.c -F: Documentation/hwmon/menf21bmc +F: Documentation/hwmon/menf21bmc.rst
MEN Z069 WATCHDOG DRIVER M: Johannes Thumshirn jth@kernel.org @@@ -10122,7 -10053,6 +10118,7 @@@ L: linux-amlogic@lists.infradead.or W: http://linux-meson.com/ S: Supported F: drivers/media/platform/meson/ao-cec.c +F: drivers/media/platform/meson/ao-cec-g12a.c F: Documentation/devicetree/bindings/media/meson-ao-cec.txt T: git git://linuxtv.org/media_tree.git
@@@ -10179,8 -10109,7 +10175,8 @@@ MICROCHIP I2C DRIVE M: Ludovic Desroches ludovic.desroches@microchip.com L: linux-i2c@vger.kernel.org S: Supported -F: drivers/i2c/busses/i2c-at91.c +F: drivers/i2c/busses/i2c-at91.h +F: drivers/i2c/busses/i2c-at91-*.c
MICROCHIP ISC DRIVER M: Eugen Hristev eugen.hristev@microchip.com @@@ -10456,7 -10385,7 +10452,7 @@@ F: arch/arm/mach-mmp
MMU GATHER AND TLB INVALIDATION M: Will Deacon will.deacon@arm.com -M: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com +M: "Aneesh Kumar K.V" aneesh.kumar@linux.ibm.com M: Andrew Morton akpm@linux-foundation.org M: Nick Piggin npiggin@gmail.com M: Peter Zijlstra peterz@infradead.org @@@ -10737,7 -10666,7 +10733,7 @@@ NCT6775 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/nct6775 +F: Documentation/hwmon/nct6775.rst F: drivers/hwmon/nct6775.c
NET_FAILOVER MODULE @@@ -10815,6 -10744,7 +10811,7 @@@ L: linux-block@vger.kernel.or L: nbd@other.debian.org F: Documentation/blockdev/nbd.txt F: drivers/block/nbd.c + F: include/trace/events/nbd.h F: include/uapi/linux/nbd.h
NETWORK DROP MONITOR @@@ -11185,16 -11115,6 +11182,16 @@@ F: Documentation/ABI/stable/sysfs-bus-n F: include/linux/nvmem-consumer.h F: include/linux/nvmem-provider.h
+NXP FXAS21002C DRIVER +M: Rui Miguel Silva rmfrfs@gmail.com +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/gyroscope/fxas21002c.txt +F: drivers/iio/gyro/fxas21002c_core.c +F: drivers/iio/gyro/fxas21002c.h +F: drivers/iio/gyro/fxas21002c_i2c.c +F: drivers/iio/gyro/fxas21002c_spi.c + NXP SGTL5000 DRIVER M: Fabio Estevam festevam@gmail.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@@ -11202,6 -11122,12 +11199,12 @@@ S: Maintaine F: Documentation/devicetree/bindings/sound/sgtl5000.txt F: sound/soc/codecs/sgtl5000*
+ NXP SJA1105 ETHERNET SWITCH DRIVER + M: Vladimir Oltean olteanv@gmail.com + L: linux-kernel@vger.kernel.org + S: Maintained + F: drivers/net/dsa/sja1105 + NXP TDA998X DRM DRIVER M: Russell King linux@armlinux.org.uk S: Maintained @@@ -11250,7 -11176,7 +11253,7 @@@ F: tools/objtool
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER M: Frederic Barrat fbarrat@linux.ibm.com -M: Andrew Donnellan andrew.donnellan@au1.ibm.com +M: Andrew Donnellan ajd@linux.ibm.com L: linuxppc-dev@lists.ozlabs.org S: Supported F: arch/powerpc/platforms/powernv/ocxl.c @@@ -11681,7 -11607,7 +11684,7 @@@ F: Documentation/devicetree/bindings/op OPL4 DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) -T: git git://git.alsa-project.org/alsa-kernel.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Maintained F: sound/drivers/opl4/
@@@ -11755,6 -11681,14 +11758,14 @@@ L: linux-i2c@vger.kernel.or S: Orphan F: drivers/i2c/busses/i2c-pasemi.c
+ PACKING + M: Vladimir Oltean olteanv@gmail.com + L: netdev@vger.kernel.org + S: Supported + F: lib/packing.c + F: include/linux/packing.h + F: Documentation/packing.txt + PADATA PARALLEL EXECUTION MECHANISM M: Steffen Klassert steffen.klassert@secunet.com L: linux-crypto@vger.kernel.org @@@ -11841,7 -11775,7 +11852,7 @@@ PC87360 HARDWARE MONITORING DRIVE M: Jim Cromie jim.cromie@gmail.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/pc87360 +F: Documentation/hwmon/pc87360.rst F: drivers/hwmon/pc87360.c
PC8736x GPIO DRIVER @@@ -11853,7 -11787,7 +11864,7 @@@ PC87427 HARDWARE MONITORING DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/pc87427 +F: Documentation/hwmon/pc87427.rst F: drivers/hwmon/pc87427.c
PCA9532 LED DRIVER @@@ -12102,12 -12036,6 +12113,12 @@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: drivers/pci/controller/
+PCIE DRIVER FOR ANNAPURNA LABS +M: Jonathan Chocron jonnyc@amazon.com +L: linux-pci@vger.kernel.org +S: Maintained +F: drivers/pci/controller/dwc/pcie-al.c + PCIE DRIVER FOR AMLOGIC MESON M: Yue Wang yue.wang@Amlogic.com L: linux-pci@vger.kernel.org @@@ -12259,7 -12187,6 +12270,7 @@@ F: arch/*/kernel/*/*/perf_event*. F: arch/*/include/asm/perf_event.h F: arch/*/kernel/perf_callchain.c F: arch/*/events/* +F: arch/*/events/*/* F: tools/perf/
PERSONALITY HANDLING @@@ -12428,23 -12355,23 +12439,23 @@@ S: Maintaine F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt F: Documentation/devicetree/bindings/hwmon/max31785.txt F: Documentation/devicetree/bindings/hwmon/ltc2978.txt -F: Documentation/hwmon/adm1275 -F: Documentation/hwmon/ibm-cffps -F: Documentation/hwmon/ir35221 -F: Documentation/hwmon/lm25066 -F: Documentation/hwmon/ltc2978 -F: Documentation/hwmon/ltc3815 -F: Documentation/hwmon/max16064 -F: Documentation/hwmon/max20751 -F: Documentation/hwmon/max31785 -F: Documentation/hwmon/max34440 -F: Documentation/hwmon/max8688 -F: Documentation/hwmon/pmbus -F: Documentation/hwmon/pmbus-core -F: Documentation/hwmon/tps40422 -F: Documentation/hwmon/ucd9000 -F: Documentation/hwmon/ucd9200 -F: Documentation/hwmon/zl6100 +F: Documentation/hwmon/adm1275.rst +F: Documentation/hwmon/ibm-cffps.rst +F: Documentation/hwmon/ir35221.rst +F: Documentation/hwmon/lm25066.rst +F: Documentation/hwmon/ltc2978.rst +F: Documentation/hwmon/ltc3815.rst +F: Documentation/hwmon/max16064.rst +F: Documentation/hwmon/max20751.rst +F: Documentation/hwmon/max31785.rst +F: Documentation/hwmon/max34440.rst +F: Documentation/hwmon/max8688.rst +F: Documentation/hwmon/pmbus.rst +F: Documentation/hwmon/pmbus-core.rst +F: Documentation/hwmon/tps40422.rst +F: Documentation/hwmon/ucd9000.rst +F: Documentation/hwmon/ucd9200.rst +F: Documentation/hwmon/zl6100.rst F: drivers/hwmon/pmbus/ F: include/linux/pmbus.h
@@@ -12500,7 -12427,7 +12511,7 @@@ M: Mark Rutland <mark.rutland@arm.com M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com L: linux-arm-kernel@lists.infradead.org S: Maintained -F: drivers/firmware/psci*.c +F: drivers/firmware/psci/ F: include/linux/psci.h F: include/uapi/linux/psci.h
@@@ -12708,7 -12635,7 +12719,7 @@@ M: Bartlomiej Zolnierkiewicz <b.zolnier L: linux-hwmon@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt -F: Documentation/hwmon/pwm-fan +F: Documentation/hwmon/pwm-fan.rst F: drivers/hwmon/pwm-fan.c
PWM IR Transmitter @@@ -13126,9 -13053,9 +13137,9 @@@ M: Josh Triplett <josh@joshtriplett.org R: Steven Rostedt rostedt@goodmis.org R: Mathieu Desnoyers mathieu.desnoyers@efficios.com R: Lai Jiangshan jiangshanlai@gmail.com -L: linux-kernel@vger.kernel.org +L: rcu@vger.kernel.org S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: tools/testing/selftests/rcutorture
RDC R-321X SoC @@@ -13174,10 -13101,10 +13185,10 @@@ R: Steven Rostedt <rostedt@goodmis.org R: Mathieu Desnoyers mathieu.desnoyers@efficios.com R: Lai Jiangshan jiangshanlai@gmail.com R: Joel Fernandes joel@joelfernandes.org -L: linux-kernel@vger.kernel.org +L: rcu@vger.kernel.org W: http://www.rdrop.com/users/paulmck/RCU/ S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: Documentation/RCU/ X: Documentation/RCU/torture.txt F: include/linux/rcu* @@@ -13487,6 -13414,12 +13498,12 @@@ T: git git://git.kernel.org/pub/scm/lin S: Maintained F: drivers/net/wireless/realtek/rtlwifi/
+ REALTEK WIRELESS DRIVER (rtw88) + M: Yan-Hsuan Chuang yhchuang@realtek.com + L: linux-wireless@vger.kernel.org + S: Maintained + F: drivers/net/wireless/realtek/rtw88/ + RTL8XXXU WIRELESS DRIVER (rtl8xxxu) M: Jes Sorensen Jes.Sorensen@gmail.com L: linux-wireless@vger.kernel.org @@@ -14329,10 -14262,10 +14346,10 @@@ M: "Paul E. McKenney" <paulmck@linux.ib M: Josh Triplett josh@joshtriplett.org R: Steven Rostedt rostedt@goodmis.org R: Mathieu Desnoyers mathieu.desnoyers@efficios.com -L: linux-kernel@vger.kernel.org +L: rcu@vger.kernel.org W: http://www.rdrop.com/users/paulmck/RCU/ S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: include/linux/srcu*.h F: kernel/rcu/srcu*.c
@@@ -14373,21 -14306,21 +14390,21 @@@ SMM665 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/smm665 +F: Documentation/hwmon/smm665.rst F: drivers/hwmon/smm665.c
SMSC EMC2103 HARDWARE MONITOR DRIVER M: Steve Glendinning steve.glendinning@shawell.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/emc2103 +F: Documentation/hwmon/emc2103.rst F: drivers/hwmon/emc2103.c
SMSC SCH5627 HARDWARE MONITOR DRIVER M: Hans de Goede hdegoede@redhat.com L: linux-hwmon@vger.kernel.org S: Supported -F: Documentation/hwmon/sch5627 +F: Documentation/hwmon/sch5627.rst F: drivers/hwmon/sch5627.c
SMSC UFX6000 and UFX7000 USB to VGA DRIVER @@@ -14400,7 -14333,7 +14417,7 @@@ SMSC47B397 HARDWARE MONITOR DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/smsc47b397 +F: Documentation/hwmon/smsc47b397.rst F: drivers/hwmon/smsc47b397.c
SMSC911x ETHERNET DRIVER @@@ -14420,8 -14353,9 +14437,8 @@@ SOC-CAMERA V4L2 SUBSYSTE L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Orphan -F: include/media/soc* -F: drivers/media/i2c/soc_camera/ -F: drivers/media/platform/soc_camera/ +F: include/media/soc_camera.h +F: drivers/staging/media/soc_camera/
SOCIONEXT SYNQUACER I2C DRIVER M: Ard Biesheuvel ard.biesheuvel@linaro.org @@@ -14557,15 -14491,16 +14574,15 @@@ T: git git://linuxtv.org/media_tree.gi S: Maintained F: drivers/media/i2c/imx355.c
-SONY MEMORYSTICK CARD SUPPORT -M: Alex Dubov oakad@yahoo.com -W: http://tifmxx.berlios.de/ -S: Maintained -F: drivers/memstick/host/tifm_ms.c - -SONY MEMORYSTICK STANDARD SUPPORT +SONY MEMORYSTICK SUBSYSTEM M: Maxim Levitsky maximlevitsky@gmail.com +M: Alex Dubov oakad@yahoo.com +M: Ulf Hansson ulf.hansson@linaro.org +L: linux-mmc@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git S: Maintained -F: drivers/memstick/core/ms_block.* +F: drivers/memstick/ +F: include/linux/memstick.h
SONY VAIO CONTROL DEVICE DRIVER M: Mattia Dongili malattia@linux.it @@@ -14583,6 -14518,7 +14600,6 @@@ M: Takashi Iwai <tiwai@suse.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://www.alsa-project.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git -T: git git://git.alsa-project.org/alsa-kernel.git Q: http://patchwork.kernel.org/project/alsa-devel/list/ S: Maintained F: Documentation/sound/ @@@ -14760,14 -14696,6 +14777,14 @@@ S: Maintaine F: drivers/iio/imu/st_lsm6dsx/ F: Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+ST MIPID02 CSI-2 TO PARALLEL BRIDGE DRIVER +M: Mickael Guene mickael.guene@st.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/st-mipid02.c +F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt + ST STM32 I2C/SMBUS DRIVER M: Pierre-Yves MORDRET pierre-yves.mordret@st.com L: linux-i2c@vger.kernel.org @@@ -15597,11 -15525,9 +15614,11 @@@ S: Maintaine F: drivers/net/ethernet/ti/cpsw* F: drivers/net/ethernet/ti/davinci*
-TI FLASH MEDIA INTERFACE DRIVER +TI FLASH MEDIA MEMORYSTICK/MMC DRIVERS M: Alex Dubov oakad@yahoo.com S: Maintained +W: http://tifmxx.berlios.de/ +F: drivers/memstick/host/tifm_ms.c F: drivers/misc/tifm* F: drivers/mmc/host/tifm_sd.c F: include/linux/tifm.h @@@ -15753,7 -15679,7 +15770,7 @@@ TMP401 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/tmp401 +F: Documentation/hwmon/tmp401.rst F: drivers/hwmon/tmp401.c
TMPFS (SHMEM FILESYSTEM) @@@ -15786,7 -15712,7 +15803,7 @@@ M: "Paul E. McKenney" <paulmck@linux.ib M: Josh Triplett josh@joshtriplett.org L: linux-kernel@vger.kernel.org S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: Documentation/RCU/torture.txt F: kernel/torture.c F: kernel/rcu/rcutorture.c @@@ -16028,12 -15954,6 +16045,12 @@@ F: drivers/uwb F: include/linux/uwb.h F: include/linux/uwb/
+UNICODE SUBSYSTEM: +M: Gabriel Krisman Bertazi krisman@collabora.com +L: linux-fsdevel@vger.kernel.org +S: Supported +F: fs/unicode/ + UNICORE32 ARCHITECTURE: M: Guan Xuetao gxt@pku.edu.cn W: http://mprc.pku.edu.cn/~guanxuetao/linux @@@ -16208,7 -16128,7 +16225,7 @@@ F: drivers/usb/storage USB MIDI DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) -T: git git://git.alsa-project.org/alsa-kernel.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Maintained F: sound/usb/midi.*
@@@ -16797,7 -16717,7 +16814,7 @@@ VT1211 HARDWARE MONITOR DRIVE M: Juerg Haefliger juergh@gmail.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/vt1211 +F: Documentation/hwmon/vt1211.rst F: drivers/hwmon/vt1211.c
VT8231 HARDWARE MONITOR DRIVER @@@ -16825,14 -16745,14 +16842,14 @@@ W83791D HARDWARE MONITORING DRIVE M: Marc Hulsman m.hulsman@tudelft.nl L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/w83791d +F: Documentation/hwmon/w83791d.rst F: drivers/hwmon/w83791d.c
W83793 HARDWARE MONITORING DRIVER M: Rudolf Marek r.marek@assembler.cz L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/hwmon/w83793 +F: Documentation/hwmon/w83793.rst F: drivers/hwmon/w83793.c
W83795 HARDWARE MONITORING DRIVER @@@ -16941,7 -16861,7 +16958,7 @@@ L: patches@opensource.cirrus.co T: git https://github.com/CirrusLogic/linux-drivers.git W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported -F: Documentation/hwmon/wm83?? +F: Documentation/hwmon/wm83??.rst F: Documentation/devicetree/bindings/extcon/extcon-arizona.txt F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt F: Documentation/devicetree/bindings/mfd/arizona.txt @@@ -17031,7 -16951,7 +17048,7 @@@ M: Tony Luck <tony.luck@intel.com M: Borislav Petkov bp@alien8.de L: linux-edac@vger.kernel.org S: Maintained -F: arch/x86/kernel/cpu/mcheck/* +F: arch/x86/kernel/cpu/mce/*
X86 MICROCODE UPDATE SUPPORT M: Borislav Petkov bp@alien8.de diff --combined Makefile index 381969a5e721,e1bb7345cdd1..28965187c528 --- a/Makefile +++ b/Makefile @@@ -2,7 -2,7 +2,7 @@@ VERSION = 5 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Shy Crocodile
# *DOCUMENTATION* @@@ -96,65 -96,56 +96,65 @@@ endi
export quiet Q KBUILD_VERBOSE
-# kbuild supports saving output files in a separate directory. -# To locate output files in a separate directory two syntaxes are supported. -# In both cases the working directory must be the root of the kernel src. +# Kbuild will save output files in the current working directory. +# This does not need to match to the root of the kernel source tree. +# +# For example, you can do this: +# +# cd /dir/to/store/output/files; make -f /dir/to/kernel/source/Makefile +# +# If you want to save output files in a different location, there are +# two syntaxes to specify it. +# # 1) O= # Use "make O=dir/to/store/output/files/" # # 2) Set KBUILD_OUTPUT -# Set the environment variable KBUILD_OUTPUT to point to the directory -# where the output files shall be placed. -# export KBUILD_OUTPUT=dir/to/store/output/files/ -# make +# Set the environment variable KBUILD_OUTPUT to point to the output directory. +# export KBUILD_OUTPUT=dir/to/store/output/files/; make # # The O= assignment takes precedence over the KBUILD_OUTPUT environment # variable.
-# KBUILD_SRC is not intended to be used by the regular user (for now), -# it is set on invocation of make with KBUILD_OUTPUT or O= specified. - -# OK, Make called in directory where kernel src resides -# Do we want to locate output files in a separate directory? +# Do we want to change the working directory? ifeq ("$(origin O)", "command line") KBUILD_OUTPUT := $(O) endif
-ifneq ($(words $(subst :, ,$(CURDIR))), 1) - $(error main directory cannot contain spaces nor colons) +ifneq ($(KBUILD_OUTPUT),) +# Make's built-in functions such as $(abspath ...), $(realpath ...) cannot +# expand a shell special character '~'. We use a somewhat tedious way here. +abs_objtree := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd) +$(if $(abs_objtree),, \ + $(error failed to create output directory "$(KBUILD_OUTPUT)")) + +# $(realpath ...) resolves symlinks +abs_objtree := $(realpath $(abs_objtree)) +else +abs_objtree := $(CURDIR) +endif # ifneq ($(KBUILD_OUTPUT),) + +ifeq ($(abs_objtree),$(CURDIR)) +# Suppress "Entering directory ..." unless we are changing the work directory. +MAKEFLAGS += --no-print-directory +else +need-sub-make := 1 endif
-ifneq ($(KBUILD_OUTPUT),) -# check that the output directory actually exists -saved-output := $(KBUILD_OUTPUT) -KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \ - && pwd) -$(if $(KBUILD_OUTPUT),, \ - $(error failed to create output directory "$(saved-output)")) +abs_srctree := $(realpath $(dir $(lastword $(MAKEFILE_LIST))))
+ifneq ($(words $(subst :, ,$(abs_srctree))), 1) +$(error source directory cannot contain spaces or colons) +endif + +ifneq ($(abs_srctree),$(abs_objtree)) # Look for make include files relative to root of kernel src # # This does not become effective immediately because MAKEFLAGS is re-parsed -# once after the Makefile is read. It is OK since we are going to invoke -# 'sub-make' below. -MAKEFLAGS += --include-dir=$(CURDIR) - +# once after the Makefile is read. We need to invoke sub-make. +MAKEFLAGS += --include-dir=$(abs_srctree) need-sub-make := 1 -else - -# Do not print "Entering directory ..." at all for in-tree build. -MAKEFLAGS += --no-print-directory - -endif # ifneq ($(KBUILD_OUTPUT),) +endif
ifneq ($(filter 3.%,$(MAKE_VERSION)),) # 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x @@@ -164,19 -155,20 +164,19 @@@ need-sub-make := $(lastword $(MAKEFILE_LIST)): ; endif
+export abs_srctree abs_objtree export sub_make_done := 1
ifeq ($(need-sub-make),1)
PHONY += $(MAKECMDGOALS) sub-make
-$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make +$(filter-out _all sub-make $(lastword $(MAKEFILE_LIST)), $(MAKECMDGOALS)) _all: sub-make @:
# Invoke a second make in the output directory, passing relevant variables sub-make: - $(Q)$(MAKE) \ - $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ - -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) + $(Q)$(MAKE) -C $(abs_objtree) -f $(abs_srctree)/Makefile $(MAKECMDGOALS)
endif # need-sub-make endif # sub_make_done @@@ -221,21 -213,16 +221,21 @@@ ifeq ("$(origin M)", "command line" KBUILD_EXTMOD := $(M) endif
-ifeq ($(KBUILD_SRC),) +ifeq ($(abs_srctree),$(abs_objtree)) # building in the source tree srctree := . else - ifeq ($(KBUILD_SRC)/,$(dir $(CURDIR))) + ifeq ($(abs_srctree)/,$(dir $(abs_objtree))) # building in a subdirectory of the source tree srctree := .. else - srctree := $(KBUILD_SRC) + srctree := $(abs_srctree) endif + + # TODO: + # KBUILD_SRC is only used to distinguish in-tree/out-of-tree build. + # Replace it with $(srctree) or something. + KBUILD_SRC := $(abs_srctree) endif
export KBUILD_CHECKSRC KBUILD_EXTMOD KBUILD_SRC @@@ -414,6 -401,7 +414,7 @@@ NM = $(CROSS_COMPILE)n STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump + PAHOLE = pahole LEX = flex YACC = bison AWK = awk @@@ -448,7 -436,7 +449,7 @@@ USERINCLUDE := LINUXINCLUDE := \ -I$(srctree)/arch/$(SRCARCH)/include \ -I$(objtree)/arch/$(SRCARCH)/include/generated \ - $(if $(KBUILD_SRC), -I$(srctree)/include) \ + $(if $(filter .,$(srctree)),,-I$(srctree)/include) \ -I$(objtree)/include \ $(USERINCLUDE)
@@@ -468,7 -456,7 +469,7 @@@ KBUILD_LDFLAGS : GCC_PLUGINS_CFLAGS :=
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC - export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS + export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
@@@ -509,7 -497,7 +510,7 @@@ PHONY += outputmakefil # At the same time when output Makefile generated, generate .gitignore to # ignore whole output directory outputmakefile: -ifneq ($(KBUILD_SRC),) +ifneq ($(srctree),.) $(Q)ln -fsn $(srctree) source $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) $(Q)test -e .gitignore || \ @@@ -532,6 -520,15 +533,6 @@@ KBUILD_AFLAGS += $(CLANG_FLAGS export CLANG_FLAGS endif
-RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register -RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register -RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk -RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline -RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) -RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) -export RETPOLINE_CFLAGS -export RETPOLINE_VDSO_CFLAGS - # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), @@@ -598,21 -595,20 +599,21 @@@ endi
export KBUILD_MODULES KBUILD_BUILTIN
+ifeq ($(dot-config),1) +include include/config/auto.conf +endif + ifeq ($(KBUILD_EXTMOD),) # Objects we will link into vmlinux / subdirs we need to visit init-y := init/ drivers-y := drivers/ sound/ +drivers-$(CONFIG_SAMPLES) += samples/ net-y := net/ libs-y := lib/ core-y := usr/ virt-y := virt/ endif # KBUILD_EXTMOD
-ifeq ($(dot-config),1) -include include/config/auto.conf -endif - # The all: target is the default when no target is given on the # command line. # This allow a user to issue only 'make' to build a kernel including modules @@@ -629,15 -625,6 +630,15 @@@ ifdef CONFIG_FUNCTION_TRACE CC_FLAGS_FTRACE := -pg endif
+RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register +RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register +RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk +RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline +RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) +RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) +export RETPOLINE_CFLAGS +export RETPOLINE_VDSO_CFLAGS + # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default # values of the respective KBUILD_* variables ARCH_CPPFLAGS := @@@ -691,6 -678,7 +692,6 @@@ KBUILD_CFLAGS += $(call cc-option,-fno- KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) -KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE @@@ -761,11 -749,6 +762,11 @@@ KBUILD_CFLAGS += -fomit-frame-pointe endif endif
+# Initialize all stack variables with a pattern, if desired. +ifdef CONFIG_INIT_STACK_ALL +KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern +endif + DEBUG_CFLAGS := $(call cc-option, -fno-var-tracking-assignments)
ifdef CONFIG_DEBUG_INFO @@@ -829,10 -812,6 +830,10 @@@ KBUILD_CFLAGS_KERNEL += -ffunction-sect LDFLAGS_vmlinux += --gc-sections endif
+ifdef CONFIG_LIVEPATCH +KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone) +endif + # arch Makefile may override CC so keep this after arch Makefile is included NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
@@@ -997,9 -976,8 +998,9 @@@ vmlinux-dirs := $(patsubst %/,%,$(filte $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ $(net-y) $(net-m) $(libs-y) $(libs-m) $(virt-y)))
-vmlinux-alldirs := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \ - $(init-) $(core-) $(drivers-) $(net-) $(libs-) $(virt-)))) +vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \ + $(patsubst %/,%,$(filter %/, $(init-) $(core-) \ + $(drivers-) $(net-) $(libs-) $(virt-))))
init-y := $(patsubst %/, %/built-in.a, $(init-y)) core-y := $(patsubst %/, %/built-in.a, $(core-y)) @@@ -1016,7 -994,7 +1017,7 @@@ export KBUILD_VMLINUX_LIBS := $(libs-y1 export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds export LDFLAGS_vmlinux # used by scripts/package/Makefile -export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools) +export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch include scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
@@@ -1053,8 -1031,11 +1054,8 @@@ vmlinux: scripts/link-vmlinux.sh autoks
targets := vmlinux
-# Build samples along the rest of the kernel. This needs headers_install. -ifdef CONFIG_SAMPLES -vmlinux-dirs += samples +# Some samples need headers_install. samples: headers_install -endif
# The actual objects are generated when descending, # make sure no implicit rule kicks in @@@ -1074,7 -1055,7 +1075,7 @@@ filechk_kernel.release = echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
# Store (new) KERNELRELEASE string in include/config/kernel.release -include/config/kernel.release: $(srctree)/Makefile FORCE +include/config/kernel.release: FORCE $(call filechk,kernel.release)
# Additional helpers built in scripts/ @@@ -1096,11 -1077,9 +1097,11 @@@ PHONY += prepare archprepare prepare1 p # and if so do: # 1) Check that make has not been executed in the kernel src $(srctree) prepare3: include/config/kernel.release -ifneq ($(KBUILD_SRC),) +ifneq ($(srctree),.) @$(kecho) ' Using $(srctree) as source for kernel' - $(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \ + $(Q)if [ -f $(srctree)/.config -o \ + -d $(srctree)/include/config -o \ + -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \ echo >&2 " $(srctree) is not clean, please run 'make mrproper'"; \ echo >&2 " in the '$(srctree)' directory.";\ /bin/false; \ @@@ -1316,7 -1295,6 +1317,7 @@@ _modinst_ fi @cp -f $(objtree)/modules.order $(MODLIB)/ @cp -f $(objtree)/modules.builtin $(MODLIB)/ + @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
# This depmod is only for convenience to give the initial @@@ -1357,11 -1335,10 +1358,11 @@@ endif # CONFIG_MODULE
# Directories & files removed with 'make clean' CLEAN_DIRS += $(MODVERDIR) include/ksym +CLEAN_FILES += modules.builtin.modinfo
# Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config usr/include include/generated \ - arch/*/include/generated .tmp_objdiff + arch/$(SRCARCH)/include/generated .tmp_objdiff MRPROPER_FILES += .config .config.old .version \ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ signing_key.pem signing_key.priv signing_key.x509 \ @@@ -1372,7 -1349,7 +1373,7 @@@ # clean: rm-dirs := $(CLEAN_DIRS) clean: rm-files := $(CLEAN_FILES) -clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples) +clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs))
PHONY += $(clean-dirs) clean archclean vmlinuxclean $(clean-dirs): diff --combined arch/arm/mach-mvebu/kirkwood.c index bf3ff0f580c2,9b5f4d665374..ceaad6d5927e --- a/arch/arm/mach-mvebu/kirkwood.c +++ b/arch/arm/mach-mvebu/kirkwood.c @@@ -92,7 -92,8 +92,8 @@@ static void __init kirkwood_dt_eth_fixu continue;
/* skip disabled nodes or nodes with valid MAC address*/ - if (!of_device_is_available(pnp) || of_get_mac_address(np)) + if (!of_device_is_available(pnp) || + !IS_ERR(of_get_mac_address(np))) goto eth_fixup_skip;
clk = of_clk_get(pnp, 0); @@@ -107,6 -108,8 +108,6 @@@ clk_prepare_enable(clk);
/* store MAC address register contents in local-mac-address */ - pr_err(FW_INFO "%pOF: local-mac-address is not set\n", np); - pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL); if (!pmac) goto eth_fixup_no_mem; diff --combined drivers/infiniband/core/addr.c index 2b791ce7597f,744b6ec0acb0..ba01b90c04e7 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@@ -42,10 -42,9 +42,10 @@@ #include <net/neighbour.h> #include <net/route.h> #include <net/netevent.h> - #include <net/addrconf.h> + #include <net/ipv6_stubs.h> #include <net/ip6_route.h> #include <rdma/ib_addr.h> +#include <rdma/ib_cache.h> #include <rdma/ib_sa.h> #include <rdma/ib.h> #include <rdma/rdma_netlink.h> @@@ -87,8 -86,8 +87,8 @@@ static inline bool ib_nl_is_good_ip_res if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) return false;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_addr_policy, NULL); + ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_addr_policy, NULL); if (ret) return false;
@@@ -352,7 -351,7 +352,7 @@@ static bool has_gateway(const struct ds
if (family == AF_INET) { rt = container_of(dst, struct rtable, dst); - return rt->rt_uses_gateway; + return rt->rt_gw_family == AF_INET; }
rt6 = container_of(dst, struct rt6_info, dst); diff --combined drivers/infiniband/core/nldev.c index bced945a456d,85324012bf07..98eadd3089ce --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@@ -116,10 -116,6 +116,10 @@@ static const struct nla_policy nldev_po [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, + [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, + [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, + .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, + [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, };
static int put_driver_name_print_type(struct sk_buff *msg, const char *name, @@@ -202,8 -198,6 +202,8 @@@ static int fill_nldev_handle(struct sk_ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) { char fw[IB_FW_VERSION_NAME_MAX]; + int ret = 0; + u8 port;
if (fill_nldev_handle(msg, device)) return -EMSGSIZE; @@@ -232,25 -226,7 +232,25 @@@ return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) return -EMSGSIZE; - return 0; + + /* + * Link type is determined on first port and mlx4 device + * which can potentially have two different link type for the same + * IB device is considered as better to be avoided in the future, + */ + port = rdma_start_port(device); + if (rdma_cap_opa_mad(device, port)) + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); + else if (rdma_protocol_ib(device, port)) + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); + else if (rdma_protocol_iwarp(device, port)) + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); + else if (rdma_protocol_roce(device, port)) + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); + else if (rdma_protocol_usnic(device, port)) + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, + "usnic"); + return ret; }
static int fill_port_info(struct sk_buff *msg, @@@ -316,7 -292,8 +316,8 @@@ static int fill_res_info_entry(struct s { struct nlattr *entry_attr;
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); + entry_attr = nla_nest_start_noflag(msg, + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); if (!entry_attr) return -EMSGSIZE;
@@@ -351,7 -328,7 +352,7 @@@ static int fill_res_info(struct sk_buf if (fill_nldev_handle(msg, device)) return -EMSGSIZE;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); + table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); if (!table_attr) return -EMSGSIZE;
@@@ -631,14 -608,14 +632,14 @@@ static int nldev_get_doit(struct sk_buf u32 index; int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -676,13 -653,13 +677,13 @@@ static int nldev_set_doit(struct sk_buf u32 index; int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, - extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -692,20 -669,9 +693,20 @@@ nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], IB_DEVICE_NAME_MAX); err = ib_device_rename(device, name); + goto done; }
+ if (tb[RDMA_NLDEV_NET_NS_FD]) { + u32 ns_fd; + + ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); + err = ib_device_set_netns_put(skb, device, ns_fd); + goto put_done; + } + +done: ib_device_put(device); +put_done: return err; }
@@@ -741,7 -707,7 +742,7 @@@ static int nldev_get_dumpit(struct sk_b { /* * There is no need to take lock, because - * we are relying on ib_core's lists_rwsem + * we are relying on ib_core's locking. */ return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); } @@@ -756,15 -722,15 +757,15 @@@ static int nldev_port_get_doit(struct s u32 port; int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -812,13 -778,13 +813,13 @@@ static int nldev_port_get_dumpit(struc int err; unsigned int p;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NULL); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(ifindex); + device = ib_device_get_by_index(sock_net(skb->sk), ifindex); if (!device) return -EINVAL;
@@@ -867,13 -833,13 +868,13 @@@ static int nldev_res_get_doit(struct sk u32 index; int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -921,6 -887,7 +922,6 @@@ static int _nldev_res_get_dumpit(struc nlmsg_cancel(skb, nlh); goto out; } - nlmsg_end(skb, nlh);
idx++; @@@ -1015,13 -982,13 +1016,13 @@@ static int res_get_common_doit(struct s struct sk_buff *msg; int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -1104,8 -1071,8 +1105,8 @@@ static int res_get_common_dumpit(struc u32 index, port = 0; bool filled = false;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NULL); /* * Right now, we are expecting the device index to get res information, * but it is possible to extend this code to return all devices in @@@ -1118,7 -1085,7 +1119,7 @@@ return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -1142,7 -1109,7 +1143,7 @@@ goto err; }
- table_attr = nla_nest_start(skb, fe->nldev_attr); + table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); if (!table_attr) { ret = -EMSGSIZE; goto err; @@@ -1168,7 -1135,7 +1169,7 @@@
filled = true;
- entry_attr = nla_nest_start(skb, fe->entry); + entry_attr = nla_nest_start_noflag(skb, fe->entry); if (!entry_attr) { ret = -EMSGSIZE; rdma_restrack_put(res); @@@ -1283,8 -1250,8 +1284,8 @@@ static int nldev_newlink(struct sk_buf char type[IFNAMSIZ]; int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) return -EINVAL; @@@ -1327,13 -1294,13 +1328,13 @@@ static int nldev_dellink(struct sk_buf u32 index; int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = ib_device_get_by_index(index); + device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL;
@@@ -1346,55 -1313,6 +1347,55 @@@ return 0; }
+static int nldev_get_sys_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; + struct nlmsghdr *nlh; + int err; + + err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NULL); + if (err) + return err; + + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NLDEV_CMD_SYS_GET), + 0, 0); + + err = nla_put_u8(skb, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, + (u8)ib_devices_shared_netns); + if (err) { + nlmsg_cancel(skb, nlh); + return err; + } + + nlmsg_end(skb, nlh); + return skb->len; +} + +static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; + u8 enable; + int err; + + err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); + if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) + return -EINVAL; + + enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); + /* Only 0 and 1 are supported */ + if (enable > 1) + return -EINVAL; + + err = rdma_compatdev_set(enable); + return err; +} + static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, @@@ -1440,13 -1358,6 +1441,13 @@@ .doit = nldev_res_get_pd_doit, .dump = nldev_res_get_pd_dumpit, }, + [RDMA_NLDEV_CMD_SYS_GET] = { + .dump = nldev_get_sys_get_dumpit, + }, + [RDMA_NLDEV_CMD_SYS_SET] = { + .doit = nldev_set_sys_set_doit, + .flags = RDMA_NL_ADMIN_PERM, + }, };
void __init nldev_init(void) diff --combined drivers/infiniband/core/sa_query.c index 114f890ab425,bb534959abf0..7d8071c7e564 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@@ -40,7 -40,7 +40,7 @@@ #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/kref.h> -#include <linux/idr.h> +#include <linux/xarray.h> #include <linux/workqueue.h> #include <uapi/linux/if_ether.h> #include <rdma/ib_pack.h> @@@ -183,7 -183,8 +183,7 @@@ static struct ib_client sa_client = .remove = ib_sa_remove_one };
-static DEFINE_SPINLOCK(idr_lock); -static DEFINE_IDR(query_idr); +static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
static DEFINE_SPINLOCK(tid_lock); static u32 tid; @@@ -1027,8 -1028,8 +1027,8 @@@ int ib_nl_handle_set_timeout(struct sk_ !(NETLINK_CB(skb).sk)) return -EPERM;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_policy, NULL); + ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_policy, NULL); attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; if (ret || !attr) goto settimeout_out; @@@ -1079,8 -1080,8 +1079,8 @@@ static inline int ib_nl_is_good_resolve if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) return 0;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_policy, NULL); + ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_policy, NULL); if (ret) return 0;
@@@ -1179,14 -1180,14 +1179,14 @@@ void ib_sa_cancel_query(int id, struct struct ib_mad_agent *agent; struct ib_mad_send_buf *mad_buf;
- spin_lock_irqsave(&idr_lock, flags); - if (idr_find(&query_idr, id) != query) { - spin_unlock_irqrestore(&idr_lock, flags); + xa_lock_irqsave(&queries, flags); + if (xa_load(&queries, id) != query) { + xa_unlock_irqrestore(&queries, flags); return; } agent = query->port->agent; mad_buf = query->mad_buf; - spin_unlock_irqrestore(&idr_lock, flags); + xa_unlock_irqrestore(&queries, flags);
/* * If the query is still on the netlink request list, schedule @@@ -1362,14 -1363,21 +1362,14 @@@ static void init_mad(struct ib_sa_quer static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, gfp_t gfp_mask) { - bool preload = gfpflags_allow_blocking(gfp_mask); unsigned long flags; int ret, id;
- if (preload) - idr_preload(gfp_mask); - spin_lock_irqsave(&idr_lock, flags); - - id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); - - spin_unlock_irqrestore(&idr_lock, flags); - if (preload) - idr_preload_end(); - if (id < 0) - return id; + xa_lock_irqsave(&queries, flags); + ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask); + xa_unlock_irqrestore(&queries, flags); + if (ret < 0) + return ret;
query->mad_buf->timeout_ms = timeout_ms; query->mad_buf->context[0] = query; @@@ -1386,9 -1394,9 +1386,9 @@@
ret = ib_post_send_mad(query->mad_buf, NULL); if (ret) { - spin_lock_irqsave(&idr_lock, flags); - idr_remove(&query_idr, id); - spin_unlock_irqrestore(&idr_lock, flags); + xa_lock_irqsave(&queries, flags); + __xa_erase(&queries, id); + xa_unlock_irqrestore(&queries, flags); }
/* @@@ -2180,9 -2188,9 +2180,9 @@@ static void send_handler(struct ib_mad_ break; }
- spin_lock_irqsave(&idr_lock, flags); - idr_remove(&query_idr, query->id); - spin_unlock_irqrestore(&idr_lock, flags); + xa_lock_irqsave(&queries, flags); + __xa_erase(&queries, query->id); + xa_unlock_irqrestore(&queries, flags);
free_mad(query); if (query->client) @@@ -2467,5 -2475,5 +2467,5 @@@ void ib_sa_cleanup(void destroy_workqueue(ib_nl_wq); mcast_cleanup(); ib_unregister_client(&sa_client); - idr_destroy(&query_idr); + WARN_ON(!xa_empty(&queries)); } diff --combined drivers/infiniband/hw/hfi1/vnic_main.c index 4d5683919b1f,2b07032dbdda..b49e60e8397d --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@@ -162,12 -162,12 +162,12 @@@ static void deallocate_vnic_ctxt(struc
void hfi1_vnic_setup(struct hfi1_devdata *dd) { - idr_init(&dd->vnic.vesw_idr); + xa_init(&dd->vnic.vesws); }
void hfi1_vnic_cleanup(struct hfi1_devdata *dd) { - idr_destroy(&dd->vnic.vesw_idr); + WARN_ON(!xa_empty(&dd->vnic.vesws)); }
#define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \ @@@ -423,8 -423,7 +423,7 @@@ tx_finish
static u16 hfi1_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); struct opa_vnic_skb_mdata *mdata; @@@ -534,7 -533,7 +533,7 @@@ void hfi1_vnic_bypass_rcv(struct hfi1_p l4_type = hfi1_16B_get_l4(packet->ebuf); if (likely(l4_type == OPA_16B_L4_ETHR)) { vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf); - vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id); + vinfo = xa_load(&dd->vnic.vesws, vesw_id);
/* * In case of invalid vesw id, count the error on @@@ -542,10 -541,9 +541,10 @@@ */ if (unlikely(!vinfo)) { struct hfi1_vnic_vport_info *vinfo_tmp; - int id_tmp = 0; + unsigned long index = 0;
- vinfo_tmp = idr_get_next(&dd->vnic.vesw_idr, &id_tmp); + vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX, + XA_PRESENT); if (vinfo_tmp) { spin_lock(&vport_cntr_lock); vinfo_tmp->stats[0].netstats.rx_nohandler++; @@@ -599,7 -597,8 +598,7 @@@ static int hfi1_vnic_up(struct hfi1_vni if (!vinfo->vesw_id) return -EINVAL;
- rc = idr_alloc(&dd->vnic.vesw_idr, vinfo, vinfo->vesw_id, - vinfo->vesw_id + 1, GFP_NOWAIT); + rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL); if (rc < 0) return rc;
@@@ -625,7 -624,7 +624,7 @@@ static void hfi1_vnic_down(struct hfi1_ clear_bit(HFI1_VNIC_UP, &vinfo->flags); netif_carrier_off(vinfo->netdev); netif_tx_disable(vinfo->netdev); - idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id); + xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
/* ensure irqs see the change */ msix_vnic_synchronize_irq(dd); diff --combined drivers/infiniband/hw/mlx5/main.c index 5ac24bce6e77,1aaa2056d188..517c8ce165b9 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -156,34 -156,6 +156,34 @@@ static int get_port_state(struct ib_dev return ret; }
+static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, + struct net_device *ndev, + u8 *port_num) +{ + struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; + struct net_device *rep_ndev; + struct mlx5_ib_port *port; + int i; + + for (i = 0; i < dev->num_ports; i++) { + port = &dev->port[i]; + if (!port->rep) + continue; + + read_lock(&port->roce.netdev_lock); + rep_ndev = mlx5_ib_get_rep_netdev(esw, + port->rep->vport); + if (rep_ndev == ndev) { + read_unlock(&port->roce.netdev_lock); + *port_num = i + 1; + return &port->roce; + } + read_unlock(&port->roce.netdev_lock); + } + + return NULL; +} + static int mlx5_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { @@@ -200,17 -172,22 +200,17 @@@
switch (event) { case NETDEV_REGISTER: + /* Should already be registered during the load */ + if (ibdev->is_rep) + break; write_lock(&roce->netdev_lock); - if (ndev->dev.parent == &mdev->pdev->dev) - if (ibdev->rep) { - struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch; - struct net_device *rep_ndev; - - rep_ndev = mlx5_ib_get_rep_netdev(esw, - ibdev->rep->vport); - if (rep_ndev == ndev) - roce->netdev = ndev; - } else if (ndev->dev.parent == mdev->device) { ++ if (ndev->dev.parent == mdev->device) roce->netdev = ndev; - } write_unlock(&roce->netdev_lock); break;
case NETDEV_UNREGISTER: + /* In case of reps, ib device goes away before the netdevs */ write_lock(&roce->netdev_lock); if (roce->netdev == ndev) roce->netdev = NULL; @@@ -228,10 -205,6 +228,10 @@@ dev_put(lag_ndev); }
+ if (ibdev->is_rep) + roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); + if (!roce) + return NOTIFY_DONE; if ((upper == ndev || (!upper && ndev == roce->netdev)) && ibdev->ib_active) { struct ib_event ibev = { }; @@@ -284,11 -257,11 +284,11 @@@ static struct net_device *mlx5_ib_get_n
/* Ensure ndev does not disappear before we invoke dev_hold() */ - read_lock(&ibdev->roce[port_num - 1].netdev_lock); - ndev = ibdev->roce[port_num - 1].netdev; + read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); + ndev = ibdev->port[port_num - 1].roce.netdev; if (ndev) dev_hold(ndev); - read_unlock(&ibdev->roce[port_num - 1].netdev_lock); + read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
out: mlx5_ib_put_native_port_mdev(ibdev, port_num); @@@ -506,14 -479,9 +506,14 @@@ static int mlx5_query_port_roce(struct
/* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. + * Use native port in case of reps */ - err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, - mdev_port_num); + if (dev->is_rep) + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, + 1); + else + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, + mdev_port_num); if (err) goto out; ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); @@@ -574,22 -542,52 +574,22 @@@ out return err; }
-struct mlx5_ib_vlan_info { - u16 vlan_id; - bool vlan; -}; - -static int get_lower_dev_vlan(struct net_device *lower_dev, void *data) -{ - struct mlx5_ib_vlan_info *vlan_info = data; - - if (is_vlan_dev(lower_dev)) { - vlan_info->vlan = true; - vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev); - } - /* We are interested only in first level vlan device, so - * always return 1 to stop iterating over next level devices. - */ - return 1; -} - static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr) { enum ib_gid_type gid_type = IB_GID_TYPE_IB; - struct mlx5_ib_vlan_info vlan_info = { }; + u16 vlan_id = 0xffff; u8 roce_version = 0; u8 roce_l3_type = 0; u8 mac[ETH_ALEN]; + int ret;
if (gid) { gid_type = attr->gid_type; - ether_addr_copy(mac, attr->ndev->dev_addr); - - if (is_vlan_dev(attr->ndev)) { - vlan_info.vlan = true; - vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev); - } else { - /* If the netdev is upper device and if it's lower - * lower device is vlan device, consider vlan id of - * the lower vlan device for this gid entry. - */ - rcu_read_lock(); - netdev_walk_all_lower_dev_rcu(attr->ndev, - get_lower_dev_vlan, &vlan_info); - rcu_read_unlock(); - } + ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); + if (ret) + return ret; }
switch (gid_type) { @@@ -610,7 -608,7 +610,7 @@@
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, roce_l3_type, gid->raw, mac, - vlan_info.vlan, vlan_info.vlan_id, + vlan_id < VLAN_CFI_MASK, vlan_id, port_num); }
@@@ -1409,9 -1407,7 +1409,9 @@@ static int mlx5_ib_rep_query_port(struc { int ret;
- /* Only link layer == ethernet is valid for representors */ + /* Only link layer == ethernet is valid for representors + * and we always use port 1 + */ ret = mlx5_query_port_roce(ibdev, port, props); if (ret || !props) return ret; @@@ -1958,11 -1954,11 +1958,11 @@@ static int mlx5_ib_alloc_ucontext(struc print_lib_caps(dev, context->lib_caps);
if (dev->lag_active) { - u8 port = mlx5_core_native_port_num(dev->mdev); + u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity, atomic_add_return( - 1, &dev->roce[port].tx_port_affinity)); + 1, &dev->port[port].roce.tx_port_affinity)); }
return 0; @@@ -2064,22 -2060,21 +2064,22 @@@ static int mlx5_ib_mmap_clock_info_page struct vm_area_struct *vma, struct mlx5_ib_ucontext *context) { - if (vma->vm_end - vma->vm_start != PAGE_SIZE) + if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || + !(vma->vm_flags & VM_SHARED)) return -EINVAL;
if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) return -EOPNOTSUPP;
- if (vma->vm_flags & VM_WRITE) + if (vma->vm_flags & (VM_WRITE | VM_EXEC)) return -EPERM; - vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
- if (!dev->mdev->clock_info_page) + if (!dev->mdev->clock_info) return -EOPNOTSUPP;
- return rdma_user_mmap_page(&context->ibucontext, vma, - dev->mdev->clock_info_page, PAGE_SIZE); + return vm_insert_page(vma, vma->vm_start, + virt_to_page(dev->mdev->clock_info)); }
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, @@@ -2321,7 -2316,7 +2321,7 @@@ err_free return ERR_PTR(err); }
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm) +int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) { struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic; struct mlx5_ib_dm *dm = to_mdm(ibdm); @@@ -2336,10 -2331,7 +2336,10 @@@ page_idx = (dm->dev_addr - memic->dev->bar_addr - MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> PAGE_SHIFT; - bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages, + bitmap_clear(rdma_udata_to_drv_context( + &attrs->driver_udata, + struct mlx5_ib_ucontext, + ibucontext)->dm_pages, page_idx, DIV_ROUND_UP(act_size, PAGE_SIZE));
@@@ -2348,7 -2340,8 +2348,7 @@@ return 0; }
-static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx5_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; @@@ -2357,10 -2350,8 +2357,10 @@@ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u16 uid = 0; + struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( + udata, struct mlx5_ib_ucontext, ibucontext);
- uid = context ? to_mucontext(context)->devx_uid : 0; + uid = context ? context->devx_uid : 0; MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); MLX5_SET(alloc_pd_in, in, uid, uid); err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), @@@ -2370,7 -2361,7 +2370,7 @@@
pd->pdn = MLX5_GET(alloc_pd_out, out, pd); pd->uid = uid; - if (context) { + if (udata) { resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); @@@ -2381,7 -2372,7 +2381,7 @@@ return 0; }
-static void mlx5_ib_dealloc_pd(struct ib_pd *pd) +static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); @@@ -3160,10 -3151,10 +3160,10 @@@ static struct mlx5_ib_flow_prio *get_fl if (ft_type == MLX5_IB_FT_RX) { fn_type = MLX5_FLOW_NAMESPACE_BYPASS; prio = &dev->flow_db->prios[priority]; - if (!dev->rep && + if (!dev->is_rep && MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; - if (!dev->rep && + if (!dev->is_rep && MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, reformat_l3_tunnel_to_l2)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; @@@ -3173,7 -3164,7 +3173,7 @@@ log_max_ft_size)); fn_type = MLX5_FLOW_NAMESPACE_EGRESS; prio = &dev->flow_db->egress_prios[priority]; - if (!dev->rep && + if (!dev->is_rep && MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; } @@@ -3206,11 -3197,12 +3206,11 @@@ if (!ns) return ERR_PTR(-ENOTSUPP);
- if (num_entries > max_table_size) - return ERR_PTR(-ENOMEM); + max_table_size = min_t(int, num_entries, max_table_size);
ft = prio->flow_table; if (!ft) - return _get_prio(ns, prio, priority, num_entries, num_groups, + return _get_prio(ns, prio, priority, max_table_size, num_groups, flags);
return prio; @@@ -3378,7 -3370,7 +3378,7 @@@ static struct mlx5_ib_flow_handler *_cr if (!is_valid_attr(dev->mdev, flow_attr)) return ERR_PTR(-EINVAL);
- if (dev->rep && is_egress) + if (dev->is_rep && is_egress) return ERR_PTR(-EINVAL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); @@@ -3409,17 -3401,13 +3409,17 @@@ if (!flow_is_multicast_only(flow_attr)) set_underlay_qp(dev, spec, underlay_qpn);
- if (dev->rep) { + if (dev->is_rep) { void *misc;
+ if (!dev->port[flow_attr->port - 1].rep) { + err = -EINVAL; + goto free; + } misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_port, - dev->rep->vport); + dev->port[flow_attr->port - 1].rep->vport); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); @@@ -3781,16 -3769,11 +3781,16 @@@ _get_flow_table(struct mlx5_ib_dev *dev bool mcast) { struct mlx5_flow_namespace *ns = NULL; - struct mlx5_ib_flow_prio *prio; - int max_table_size; + struct mlx5_ib_flow_prio *prio = NULL; + int max_table_size = 0; u32 flags = 0; int priority;
+ if (mcast) + priority = MLX5_IB_FLOW_MCAST_PRIO; + else + priority = ib_prio_to_core_prio(fs_matcher->priority, false); + if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size)); @@@ -3799,18 -3782,20 +3799,18 @@@ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, reformat_l3_tunnel_to_l2)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */ - max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, - log_max_ft_size)); + } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) { + max_table_size = BIT( + MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; + } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) { + max_table_size = BIT( + MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); + priority = FDB_BYPASS_PATH; }
- if (max_table_size < MLX5_FS_MAX_ENTRIES) - return ERR_PTR(-ENOMEM); - - if (mcast) - priority = MLX5_IB_FLOW_MCAST_PRIO; - else - priority = ib_prio_to_core_prio(fs_matcher->priority, false); + max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); if (!ns) @@@ -3818,18 -3803,13 +3818,18 @@@
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) prio = &dev->flow_db->prios[priority]; - else + else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) prio = &dev->flow_db->egress_prios[priority]; + else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) + prio = &dev->flow_db->fdb; + + if (!prio) + return ERR_PTR(-EINVAL);
if (prio->flow_table) return prio;
- return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES, + return _get_prio(ns, prio, priority, max_table_size, MLX5_FS_MAX_TYPES, flags); }
@@@ -4376,9 -4356,13 +4376,13 @@@ static void delay_drop_handler(struct w static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, struct ib_event *ibev) { + u8 port = (eqe->data.port.port >> 4) & 0xf; + switch (eqe->sub_type) { case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: - schedule_work(&ibdev->delay_drop.delay_drop_work); + if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == + IB_LINK_LAYER_ETHERNET) + schedule_work(&ibdev->delay_drop.delay_drop_work); break; default: /* do nothing */ return; @@@ -4525,7 -4509,7 +4529,7 @@@ static int set_has_smi_cap(struct mlx5_ int err; int port;
- for (port = 1; port <= dev->num_ports; port++) { + for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) { dev->mdev->port_caps[port - 1].has_smi = false; if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) { @@@ -4556,7 -4540,7 +4560,7 @@@ static void get_ext_port_caps(struct ml mlx5_query_ext_port_caps(dev, port); }
-static int get_port_caps(struct mlx5_ib_dev *dev, u8 port) +static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port) { struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; @@@ -4571,6 -4555,10 +4575,6 @@@ if (!dprops) goto out;
- err = set_has_smi_cap(dev); - if (err) - goto out; - err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); if (err) { mlx5_ib_warn(dev, "query_device failed %d\n", err); @@@ -4599,16 -4587,6 +4603,16 @@@ out return err; }
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port) +{ + /* For representors use port 1, is this is the only native + * port + */ + if (dev->is_rep) + return __get_port_caps(dev, 1); + return __get_port_caps(dev, port); +} + static void destroy_umrc_res(struct mlx5_ib_dev *dev) { int err; @@@ -4618,7 -4596,7 +4622,7 @@@ mlx5_ib_warn(dev, "mr cache cleanup failed\n");
if (dev->umrc.qp) - mlx5_ib_destroy_qp(dev->umrc.qp); + mlx5_ib_destroy_qp(dev->umrc.qp, NULL); if (dev->umrc.cq) ib_free_cq(dev->umrc.cq); if (dev->umrc.pd) @@@ -4723,7 -4701,7 +4727,7 @@@ static int create_umr_res(struct mlx5_i return 0;
error_4: - mlx5_ib_destroy_qp(qp); + mlx5_ib_destroy_qp(qp, NULL); dev->umrc.qp = NULL;
error_3: @@@ -4774,11 -4752,11 +4778,11 @@@ static int create_dev_resources(struct devr->p0->uobject = NULL; atomic_set(&devr->p0->usecnt, 0);
- ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL); + ret = mlx5_ib_alloc_pd(devr->p0, NULL); if (ret) goto error0;
- devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); goto error1; @@@ -4790,7 -4768,7 +4794,7 @@@ devr->c0->cq_context = NULL; atomic_set(&devr->c0->usecnt, 0);
- devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x0)) { ret = PTR_ERR(devr->x0); goto error2; @@@ -4801,7 -4779,7 +4805,7 @@@ mutex_init(&devr->x0->tgt_qp_mutex); INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
- devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x1)) { ret = PTR_ERR(devr->x1); goto error3; @@@ -4819,21 -4797,19 +4823,21 @@@ attr.ext.cq = devr->c0; attr.ext.xrc.xrcd = devr->x0;
- devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); - if (IS_ERR(devr->s0)) { - ret = PTR_ERR(devr->s0); + devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); + if (!devr->s0) { + ret = -ENOMEM; goto error4; } + devr->s0->device = &dev->ib_dev; devr->s0->pd = devr->p0; - devr->s0->uobject = NULL; - devr->s0->event_handler = NULL; - devr->s0->srq_context = NULL; devr->s0->srq_type = IB_SRQT_XRC; devr->s0->ext.xrc.xrcd = devr->x0; devr->s0->ext.cq = devr->c0; + ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); + if (ret) + goto err_create; + atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); atomic_inc(&devr->s0->ext.cq->usecnt); atomic_inc(&devr->p0->usecnt); @@@ -4843,21 -4819,18 +4847,21 @@@ attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_BASIC; - devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); - if (IS_ERR(devr->s1)) { - ret = PTR_ERR(devr->s1); + devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); + if (!devr->s1) { + ret = -ENOMEM; goto error5; } + devr->s1->device = &dev->ib_dev; devr->s1->pd = devr->p0; - devr->s1->uobject = NULL; - devr->s1->event_handler = NULL; - devr->s1->srq_context = NULL; devr->s1->srq_type = IB_SRQT_BASIC; devr->s1->ext.cq = devr->c0; + + ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); + if (ret) + goto error6; + atomic_inc(&devr->p0->usecnt); atomic_set(&devr->s1->usecnt, 0);
@@@ -4869,20 -4842,16 +4873,20 @@@
return 0;
+error6: + kfree(devr->s1); error5: - mlx5_ib_destroy_srq(devr->s0); + mlx5_ib_destroy_srq(devr->s0, NULL); +err_create: + kfree(devr->s0); error4: - mlx5_ib_dealloc_xrcd(devr->x1); + mlx5_ib_dealloc_xrcd(devr->x1, NULL); error3: - mlx5_ib_dealloc_xrcd(devr->x0); + mlx5_ib_dealloc_xrcd(devr->x0, NULL); error2: - mlx5_ib_destroy_cq(devr->c0); + mlx5_ib_destroy_cq(devr->c0, NULL); error1: - mlx5_ib_dealloc_pd(devr->p0); + mlx5_ib_dealloc_pd(devr->p0, NULL); error0: kfree(devr->p0); return ret; @@@ -4890,20 -4859,20 +4894,20 @@@
static void destroy_dev_resources(struct mlx5_ib_resources *devr) { - struct mlx5_ib_dev *dev = - container_of(devr, struct mlx5_ib_dev, devr); int port;
- mlx5_ib_destroy_srq(devr->s1); - mlx5_ib_destroy_srq(devr->s0); - mlx5_ib_dealloc_xrcd(devr->x0); - mlx5_ib_dealloc_xrcd(devr->x1); - mlx5_ib_destroy_cq(devr->c0); - mlx5_ib_dealloc_pd(devr->p0); + mlx5_ib_destroy_srq(devr->s1, NULL); + kfree(devr->s1); + mlx5_ib_destroy_srq(devr->s0, NULL); + kfree(devr->s0); + mlx5_ib_dealloc_xrcd(devr->x0, NULL); + mlx5_ib_dealloc_xrcd(devr->x1, NULL); + mlx5_ib_destroy_cq(devr->c0, NULL); + mlx5_ib_dealloc_pd(devr->p0, NULL); kfree(devr->p0);
/* Make sure no change P_Key work items are still executing */ - for (port = 0; port < dev->num_ports; ++port) + for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) cancel_work_sync(&devr->ports[port].pkey_change_work); }
@@@ -5046,10 -5015,10 +5050,10 @@@ static int mlx5_add_netdev_notifier(str { int err;
- dev->roce[port_num].nb.notifier_call = mlx5_netdev_event; - err = register_netdevice_notifier(&dev->roce[port_num].nb); + dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; + err = register_netdevice_notifier(&dev->port[port_num].roce.nb); if (err) { - dev->roce[port_num].nb.notifier_call = NULL; + dev->port[port_num].roce.nb.notifier_call = NULL; return err; }
@@@ -5058,9 -5027,9 +5062,9 @@@
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) { - if (dev->roce[port_num].nb.notifier_call) { - unregister_netdevice_notifier(&dev->roce[port_num].nb); - dev->roce[port_num].nb.notifier_call = NULL; + if (dev->port[port_num].roce.nb.notifier_call) { + unregister_netdevice_notifier(&dev->port[port_num].roce.nb); + dev->port[port_num].roce.nb.notifier_call = NULL; } }
@@@ -5609,7 -5578,7 +5613,7 @@@ static void mlx5_ib_unbind_slave_port(s mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", port_num + 1);
- ibdev->roce[port_num].last_port_state = IB_PORT_DOWN; + ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; }
/* The mlx5_ib_multiport_mutex should be held when calling this function */ @@@ -5710,7 -5679,8 +5714,8 @@@ static int mlx5_ib_init_multiport_maste }
if (bound) { - dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n"); + dev_dbg(mpi->mdev->device, + "removing port from unaffiliated list.\n"); mlx5_ib_dbg(dev, "port %d bound\n", i + 1); list_del(&mpi->list); break; @@@ -5859,36 -5829,35 +5864,36 @@@ static struct ib_counters *mlx5_ib_crea return &mcounters->ibcntrs; }
-void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_cleanup_multiport_master(dev); if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { srcu_barrier(&dev->mr_srcu); cleanup_srcu_struct(&dev->mr_srcu); } - kfree(dev->port); }
-int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; int err; int i;
- dev->port = kcalloc(dev->num_ports, sizeof(*dev->port), - GFP_KERNEL); - if (!dev->port) - return -ENOMEM; - for (i = 0; i < dev->num_ports; i++) { spin_lock_init(&dev->port[i].mp.mpi_lock); - rwlock_init(&dev->roce[i].netdev_lock); + rwlock_init(&dev->port[i].roce.netdev_lock); + dev->port[i].roce.dev = dev; + dev->port[i].roce.native_port_num = i + 1; + dev->port[i].roce.last_port_state = IB_PORT_DOWN; }
err = mlx5_ib_init_multiport_master(dev); if (err) - goto err_free_port; + return err; + + err = set_has_smi_cap(dev); + if (err) + return err;
if (!mlx5_core_mp_enabled(mdev)) { for (i = 1; i <= dev->num_ports; i++) { @@@ -5910,7 -5879,7 +5915,7 @@@ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); - dev->ib_dev.dev.parent = &mdev->pdev->dev; + dev->ib_dev.dev.parent = mdev->device;
mutex_init(&dev->cap_mask_mutex); INIT_LIST_HEAD(&dev->qp_list); @@@ -5929,6 -5898,9 +5934,6 @@@ err_mp: mlx5_ib_cleanup_multiport_master(dev);
-err_free_port: - kfree(dev->port); - return -ENOMEM; }
@@@ -5944,6 -5916,20 +5949,6 @@@ static int mlx5_ib_stage_flow_db_init(s return 0; }
-int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev) -{ - struct mlx5_ib_dev *nic_dev; - - nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch); - - if (!nic_dev) - return -EINVAL; - - dev->flow_db = nic_dev->flow_db; - - return 0; -} - static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) { kfree(dev->flow_db); @@@ -6003,10 -5989,7 +6008,10 @@@ static const struct ib_device_ops mlx5_ .req_notify_cq = mlx5_ib_arm_cq, .rereg_user_mr = mlx5_ib_rereg_user_mr, .resize_cq = mlx5_ib_resize_cq, + + INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), };
@@@ -6042,7 -6025,7 +6047,7 @@@ static const struct ib_device_ops mlx5_ .reg_dm_mr = mlx5_ib_reg_dm_mr, };
-int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; int err; @@@ -6148,7 -6131,7 +6153,7 @@@ static const struct ib_device_ops mlx5_ .query_port = mlx5_ib_rep_query_port, };
-int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) { ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); return 0; @@@ -6166,6 -6149,13 +6171,6 @@@ static const struct ib_device_ops mlx5_ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) { u8 port_num; - int i; - - for (i = 0; i < dev->num_ports; i++) { - dev->roce[i].dev = dev; - dev->roce[i].native_port_num = i + 1; - dev->roce[i].last_port_state = IB_PORT_DOWN; - }
dev->ib_dev.uverbs_ex_cmd_mask |= (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | @@@ -6177,7 -6167,6 +6182,7 @@@
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ /* Register only for native ports */ return mlx5_add_netdev_notifier(dev, port_num); }
@@@ -6188,7 -6177,7 +6193,7 @@@ static void mlx5_ib_stage_common_roce_c mlx5_remove_netdev_notifier(dev, port_num); }
-int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; enum rdma_link_layer ll; @@@ -6204,7 -6193,7 +6209,7 @@@ return err; }
-void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_stage_common_roce_cleanup(dev); } @@@ -6251,12 -6240,12 +6256,12 @@@ static void mlx5_ib_stage_roce_cleanup( } }
-int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) { return create_dev_resources(&dev->devr); }
-void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) { destroy_dev_resources(&dev->devr); } @@@ -6278,7 -6267,7 +6283,7 @@@ static const struct ib_device_ops mlx5_ .get_hw_stats = mlx5_ib_get_hw_stats, };
-int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); @@@ -6289,7 -6278,7 +6294,7 @@@ return 0; }
-void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) mlx5_ib_dealloc_counters(dev); @@@ -6319,7 -6308,7 +6324,7 @@@ static void mlx5_ib_stage_uar_cleanup(s mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); }
-int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) { int err;
@@@ -6334,13 -6323,13 +6339,13 @@@ return err; }
-void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) { mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); mlx5_free_bfreg(dev->mdev, &dev->bfreg); }
-int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { const char *name;
@@@ -6352,17 -6341,17 +6357,17 @@@ return ib_register_device(&dev->ib_dev, name); }
-void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) { destroy_umrc_res(dev); }
-void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) { ib_unregister_device(&dev->ib_dev); }
-int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) { return create_umr_res(dev); } @@@ -6417,9 -6406,6 +6422,9 @@@ void __mlx5_ib_remove(struct mlx5_ib_de if (profile->stage[stage].cleanup) profile->stage[stage].cleanup(dev); } + + kfree(dev->port); + ib_dealloc_device(&dev->ib_dev); }
void *__mlx5_ib_add(struct mlx5_ib_dev *dev, @@@ -6541,9 -6527,6 +6546,9 @@@ const struct mlx5_ib_profile uplink_rep STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, NULL, mlx5_ib_stage_pre_ib_reg_umr_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, + mlx5_ib_stage_devx_init, + mlx5_ib_stage_devx_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), @@@ -6585,7 -6568,8 +6590,8 @@@ static void *mlx5_ib_add_slave_port(str
if (!bound) { list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); - dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n"); + dev_dbg(mdev->device, + "no suitable IB device found to bind to, added to unaffiliated list.\n"); } mutex_unlock(&mlx5_ib_multiport_mutex);
@@@ -6597,14 -6581,12 +6603,14 @@@ static void *mlx5_ib_add(struct mlx5_co enum rdma_link_layer ll; struct mlx5_ib_dev *dev; int port_type_cap; + int num_ports;
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) && mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { - mlx5_ib_register_vport_reps(mdev); + if (!mlx5_core_mp_enabled(mdev)) + mlx5_ib_register_vport_reps(mdev); return mdev; }
@@@ -6614,20 -6596,13 +6620,20 @@@ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) return mlx5_ib_add_slave_port(mdev);
+ num_ports = max(MLX5_CAP_GEN(mdev, num_ports), + MLX5_CAP_GEN(mdev, num_vhca_ports)); dev = ib_alloc_device(mlx5_ib_dev, ib_dev); if (!dev) return NULL; + dev->port = kcalloc(num_ports, sizeof(*dev->port), + GFP_KERNEL); + if (!dev->port) { + ib_dealloc_device((struct ib_device *)dev); + return NULL; + }
dev->mdev = mdev; - dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), - MLX5_CAP_GEN(mdev, num_vhca_ports)); + dev->num_ports = num_ports;
return __mlx5_ib_add(dev, &pf_profile); } @@@ -6654,6 -6629,8 +6660,6 @@@ static void mlx5_ib_remove(struct mlx5_
dev = context; __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); - - ib_dealloc_device((struct ib_device *)dev); }
static struct mlx5_interface mlx5_ib_interface = { diff --combined drivers/infiniband/hw/nes/nes_cm.c index 79a43531c66d,0010a3ed64f1..62bf986eba67 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@@ -1407,7 -1407,7 +1407,7 @@@ static int nes_addr_resolve_neigh(struc if (neigh->nud_state & NUD_VALID) { nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" " is %pM, Gateway is 0x%08X \n", dst_ip, - neigh->ha, ntohl(rt->rt_gateway)); + neigh->ha, ntohl(rt->rt_gw4));
if (arpindex >= 0) { if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) { @@@ -3033,8 -3033,7 +3033,8 @@@ static int nes_disconnect(struct nes_q /* Need to free the Last Streaming Mode Message */ if (nesqp->ietf_frame) { if (nesqp->lsmm_mr) - nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr); + nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr, + NULL); pci_free_consistent(nesdev->pcidev, nesqp->private_data_len + nesqp->ietf_frame_size, nesqp->ietf_frame, nesqp->ietf_frame_pbase); diff --combined drivers/net/ethernet/aeroflex/greth.c index 3155f7fa83eb,7c5cf0224a70..90080a886cd9 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@@ -613,6 -613,7 +613,6 @@@ static irqreturn_t greth_interrupt(int napi_schedule(&greth->napi); }
- mmiowb(); spin_unlock(&greth->devlock);
return retval; @@@ -1458,7 -1459,7 +1458,7 @@@ static int greth_of_probe(struct platfo const u8 *addr;
addr = of_get_mac_address(ofdev->dev.of_node); - if (addr) { + if (!IS_ERR(addr)) { for (i = 0; i < 6; i++) macaddr[i] = (unsigned int) addr[i]; } else { diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 0c8f5b546c6f,6012fe61735e..008ad0ca89ba --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -1909,8 -1909,7 +1909,7 @@@ void bnx2x_netif_stop(struct bnx2x *bp }
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct bnx2x *bp = netdev_priv(dev);
@@@ -1932,7 -1931,7 +1931,7 @@@ }
/* select a non-FCoE queue */ - return fallback(dev, skb, NULL) % + return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); }
@@@ -4166,6 -4165,8 +4165,6 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
- mmiowb(); - txdata->tx_bd_prod += nbd;
if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 2d57af9c061c,7f8df08a7a4c..c2f6e44e9a3f --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@@ -498,8 -498,7 +498,7 @@@ int bnx2x_set_vf_spoofchk(struct net_de
/* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback); + struct net_device *sb_dev);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, @@@ -527,6 -526,8 +526,6 @@@ REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, ((u32 *)&rx_prods)[i]);
- mmiowb(); /* keep prod updates ordered */ - DP(NETIF_MSG_RX_STATUS, "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", fp->index, bd_prod, rx_comp_prod, rx_sge_prod); @@@ -651,6 -652,7 +650,6 @@@ static inline void bnx2x_igu_ack_sb_gen REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
/* Make sure that ACK is written */ - mmiowb(); barrier(); }
@@@ -671,6 -673,7 +670,6 @@@ static inline void bnx2x_hc_ack_sb(stru REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */ - mmiowb(); barrier(); }
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3716c828ff5d,0d6c98a9e07b..03ac10b1cd1e --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -869,6 -869,9 +869,6 @@@ static void bnx2x_hc_int_disable(struc "write %x to HC %d (addr 0x%x)\n", val, port, addr);
- /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@@ -884,6 -887,9 +884,6 @@@ static void bnx2x_igu_int_disable(struc
DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
- /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@@ -1589,6 -1595,7 +1589,6 @@@ static void bnx2x_hc_int_enable(struct /* * Ensure that HC_CONFIG is written before leading/trailing edge config */ - mmiowb(); barrier();
if (!CHIP_IS_E1(bp)) { @@@ -1604,6 -1611,9 +1604,6 @@@ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); }
static void bnx2x_igu_int_enable(struct bnx2x *bp) @@@ -1664,6 -1674,9 +1664,6 @@@
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); }
void bnx2x_int_enable(struct bnx2x *bp) @@@ -3820,6 -3833,7 +3820,6 @@@ static void bnx2x_sp_prod_update(struc
REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); - mmiowb(); }
/** @@@ -5230,6 -5244,7 +5230,6 @@@ static void bnx2x_update_eq_prod(struc { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); - mmiowb(); /* keep prod updates ordered */ }
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, @@@ -6498,6 -6513,7 +6498,6 @@@ void bnx2x_nic_init_cnic(struct bnx2x *
/* flush all */ mb(); - mmiowb(); }
void bnx2x_pre_irq_nic_init(struct bnx2x *bp) @@@ -6537,6 -6553,7 +6537,6 @@@ void bnx2x_post_irq_nic_init(struct bnx
/* flush all before enabling interrupts */ mb(); - mmiowb();
bnx2x_int_enable(bp);
@@@ -7758,10 -7775,12 +7758,10 @@@ void bnx2x_igu_clear_sb_gen(struct bnx2 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(bp, igu_addr_data, data); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier();
/* wait for clean up to finish */ @@@ -9531,6 -9550,7 +9531,6 @@@ static void bnx2x_set_234_gates(struct
DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); - mmiowb(); }
#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ @@@ -9654,6 -9674,7 +9654,6 @@@ static void bnx2x_pxp_prep(struct bnx2 if (!CHIP_IS_E1(bp)) { REG_WR(bp, PXP2_REG_RD_START_INIT, 0); REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - mmiowb(); } }
@@@ -9753,13 -9774,16 +9753,13 @@@ static void bnx2x_process_kill_chip_res reset_mask1 & (~not_reset_mask1));
barrier(); - mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2));
barrier(); - mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - mmiowb(); }
/** @@@ -9843,6 -9867,9 +9843,6 @@@ static int bnx2x_process_kill(struct bn REG_WR(bp, MISC_REG_UNPREPARED, 0); barrier();
- /* Make sure all is written to the chip before the reset */ - mmiowb(); - /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ @@@ -14801,6 -14828,7 +14801,6 @@@ static int bnx2x_drv_ctl(struct net_dev if (rc) break;
- mmiowb(); barrier();
/* Start accepting on iSCSI L2 ring */ @@@ -14835,6 -14863,7 +14835,6 @@@ if (!bnx2x_wait_sp_comp(bp, sp_bits)) BNX2X_ERR("rx_mode completion timed out!\n");
- mmiowb(); barrier();
/* Unset iSCSI L2 MAC */ @@@ -15347,27 -15376,47 +15347,47 @@@ static int bnx2x_enable_ptp_packets(str return 0; }
+ #define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5 + #define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB + #define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) + #define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) + #define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE) + #define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE) + #define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA) + #define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE) + #define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF) + #define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF) + #define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) + #define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) + int bnx2x_configure_ptp_filters(struct bnx2x *bp) { int port = BP_PORT(bp); + u32 param, rule; int rc;
if (!bp->hwtstamp_ioctl_called) return 0;
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK; + rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK; switch (bp->tx_type) { case HWTSTAMP_TX_ON: bp->flags |= TX_TIMESTAMPING_EN; - REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : - NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA); - REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : - NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE); + REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK); break; case HWTSTAMP_TX_ONESTEP_SYNC: BNX2X_ERR("One-step timestamping is not supported\n"); return -ERANGE; }
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK; + rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK; switch (bp->rx_filter) { case HWTSTAMP_FILTER_NONE: break; @@@ -15381,30 -15430,24 +15401,24 @@@ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE); + REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK); break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE); + REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK); break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; /* Initialize PTP detection L2 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF); + REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
break; case HWTSTAMP_FILTER_PTP_V2_EVENT: @@@ -15412,10 -15455,8 +15426,8 @@@ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE); + REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK); break; }
diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2a4341708c0f,e2c022eff256..8314c00d7537 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -114,6 -114,7 +114,7 @@@ enum board_idx BCM5745x_NPAR, BCM57508, BCM57504, + BCM57502, BCM58802, BCM58804, BCM58808, @@@ -158,6 -159,7 +159,7 @@@ static const struct [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@@ -205,6 -207,7 +207,7 @@@ static const struct pci_device_id bnxt_ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, + { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@@ -216,6 -219,7 +219,7 @@@ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif @@@ -551,13 -555,15 +555,13 @@@ normal_tx prod = NEXT_TX(prod); txr->tx_prod = prod;
- if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
- mmiowb(); - if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { - if (skb->xmit_more && !tx_buf->is_push) + if (netdev_xmit_more() && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq); @@@ -897,7 -903,7 +901,7 @@@ static struct sk_buff *bnxt_rx_page_skb DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload)) - payload = eth_get_headlen(data_ptr, len); + payload = eth_get_headlen(bp->dev, data_ptr, len);
skb = napi_alloc_skb(&rxr->bnapi->napi, payload); if (!skb) { @@@ -2132,6 -2138,7 +2136,6 @@@ static int bnxt_poll(struct napi_struc &dim_sample); net_dim(&cpr->dim, dim_sample); } - mmiowb(); return work_done; }
@@@ -3393,6 -3400,12 +3397,12 @@@ static void bnxt_free_port_stats(struc bp->hw_rx_port_stats_ext_map); bp->hw_rx_port_stats_ext = NULL; } + + if (bp->hw_pcie_stats) { + dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), + bp->hw_pcie_stats, bp->hw_pcie_stats_map); + bp->hw_pcie_stats = NULL; + } }
static void bnxt_free_ring_stats(struct bnxt *bp) @@@ -3437,56 -3450,68 +3447,68 @@@ static int bnxt_alloc_stats(struct bnx cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; }
- if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { - if (bp->hw_rx_port_stats) - goto alloc_ext_stats; + if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) + return 0;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) + - sizeof(struct tx_port_stats) + 1024; + if (bp->hw_rx_port_stats) + goto alloc_ext_stats;
- bp->hw_rx_port_stats = - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, - &bp->hw_rx_port_stats_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats) - return -ENOMEM; + bp->hw_port_stats_size = sizeof(struct rx_port_stats) + + sizeof(struct tx_port_stats) + 1024;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + - 512; - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + - sizeof(struct rx_port_stats) + 512; - bp->flags |= BNXT_FLAG_PORT_STATS; + bp->hw_rx_port_stats = + dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, + &bp->hw_rx_port_stats_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats) + return -ENOMEM; + + bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; + bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + + sizeof(struct rx_port_stats) + 512; + bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats: - /* Display extended statistics only if FW supports it */ - if (bp->hwrm_spec_code < 0x10804 || - bp->hwrm_spec_code == 0x10900) + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) return 0;
- if (bp->hw_rx_port_stats_ext) - goto alloc_tx_ext_stats; + if (bp->hw_rx_port_stats_ext) + goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, - sizeof(struct rx_port_stats_ext), - &bp->hw_rx_port_stats_ext_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats_ext) - return 0; + bp->hw_rx_port_stats_ext = + dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), + &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); + if (!bp->hw_rx_port_stats_ext) + return 0;
alloc_tx_ext_stats: - if (bp->hw_tx_port_stats_ext) - return 0; + if (bp->hw_tx_port_stats_ext) + goto alloc_pcie_stats;
- if (bp->hwrm_spec_code >= 0x10902) { - bp->hw_tx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, - sizeof(struct tx_port_stats_ext), - &bp->hw_tx_port_stats_ext_map, - GFP_KERNEL); - } - bp->flags |= BNXT_FLAG_PORT_STATS_EXT; + if (bp->hwrm_spec_code >= 0x10902 || + (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { + bp->hw_tx_port_stats_ext = + dma_alloc_coherent(&pdev->dev, + sizeof(struct tx_port_stats_ext), + &bp->hw_tx_port_stats_ext_map, + GFP_KERNEL); } + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; + + alloc_pcie_stats: + if (bp->hw_pcie_stats || + !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) + return 0; + + bp->hw_pcie_stats = + dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), + &bp->hw_pcie_stats_map, GFP_KERNEL); + if (!bp->hw_pcie_stats) + return 0; + + bp->flags |= BNXT_FLAG_PCIE_STATS; return 0; }
@@@ -4205,16 -4230,25 +4227,25 @@@ static int bnxt_hwrm_cfa_ntuple_filter_ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct flow_keys *keys = &fltr->fkeys; + struct bnxt_vnic_info *vnic; + u32 dst_ena = 0; int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) { + dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; + req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq); + vnic = &bp->vnic_info[0]; + } else { + vnic = &bp->vnic_info[fltr->rxq + 1]; + } + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); + req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
req.ethertype = htons(ETH_P_IP); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); @@@ -4252,7 -4286,6 +4283,6 @@@ req.dst_port = keys->ports.dst; req.dst_port_mask = cpu_to_be16(0xffff);
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { @@@ -5500,11 -5533,13 +5530,13 @@@ static bool bnxt_need_reserve_rings(str stat = bnxt_get_func_stat_ctxs(bp); if (BNXT_NEW_RM(bp) && (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || - hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || - hw_resc->resv_stat_ctxs != stat || + hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || (hw_resc->resv_hw_ring_grps != grp && !(bp->flags & BNXT_FLAG_CHIP_P5)))) return true; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && + hw_resc->resv_irqs != nq) + return true; return false; }
@@@ -6053,6 -6088,8 +6085,8 @@@ static int bnxt_hwrm_func_backing_store ctx->tqm_entries_multiple = 1; ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); + ctx->mrav_num_entries_units = + le16_to_cpu(resp->mrav_num_entries_units); ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); } else { @@@ -6099,6 -6136,7 +6133,7 @@@ static int bnxt_hwrm_func_backing_store struct bnxt_ctx_pg_info *ctx_pg; __le32 *num_entries; __le64 *pg_dir; + u32 flags = 0; u8 *pg_attr; int i, rc; u32 ena; @@@ -6158,6 -6196,9 +6193,9 @@@ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { ctx_pg = &ctx->mrav_mem; req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + if (ctx->mrav_num_entries_units) + flags |= + FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.mrav_pg_size_mrav_lvl, @@@ -6184,6 -6225,7 +6222,7 @@@ *num_entries = cpu_to_le32(ctx_pg->entries); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } + req.flags = cpu_to_le32(flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) rc = -EIO; @@@ -6322,6 -6364,7 +6361,7 @@@ static int bnxt_alloc_ctx_mem(struct bn struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; u32 mem_size, ena, entries; + u32 num_mr, num_ah; u32 extra_srqs = 0; u32 extra_qps = 0; u8 pg_lvl = 1; @@@ -6385,12 -6428,21 +6425,21 @@@ goto skip_rdma;
ctx_pg = &ctx->mrav_mem; - ctx_pg->entries = extra_qps * 4; + /* 128K extra is needed to accommodate static AH context + * allocation by f/w. + */ + num_mr = 1024 * 256; + num_ah = 1024 * 128; + ctx_pg->entries = num_mr + num_ah; mem_size = ctx->mrav_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); if (rc) return rc; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; + if (ctx->mrav_num_entries_units) + ctx_pg->entries = + ((num_mr / ctx->mrav_num_entries_units) << 16) | + (num_ah / ctx->mrav_num_entries_units);
ctx_pg = &ctx->tim_mem; ctx_pg->entries = ctx->qp_mem.entries; @@@ -6505,6 -6557,10 +6554,10 @@@ static int __bnxt_hwrm_func_qcaps(struc bp->flags |= BNXT_FLAG_ROCEV1_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) bp->flags |= BNXT_FLAG_ROCEV2_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
bp->tx_push_thresh = 0; if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) @@@ -6577,6 -6633,34 +6630,34 @@@ static int bnxt_hwrm_func_qcaps(struct return 0; }
+ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) + { + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; + int rc = 0; + u32 flags; + + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) + return 0; + + resp = bp->hwrm_cmd_resp_addr; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_cfa_adv_qcaps_exit; + + flags = le32_to_cpu(resp->flags); + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX; + + hwrm_cfa_adv_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + static int bnxt_hwrm_func_reset(struct bnxt *bp) { struct hwrm_func_reset_input req = {0}; @@@ -6668,6 -6752,15 +6749,15 @@@ static int bnxt_hwrm_ver_get(struct bnx resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+ if (strlen(resp->active_pkg_name)) { + int fw_ver_len = strlen(bp->fw_ver_str); + + snprintf(bp->fw_ver_str + fw_ver_len, + FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", + resp->active_pkg_name); + bp->fw_cap |= BNXT_FW_CAP_PKG_VER; + } + bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); if (!bp->hwrm_cmd_timeout) bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; @@@ -6700,6 -6793,10 +6790,10 @@@ VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
+ if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@@ -6805,6 -6902,19 +6899,19 @@@ static int bnxt_hwrm_port_qstats_ext(st return rc; }
+ static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) + { + struct hwrm_pcie_qstats_input req = {0}; + + if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); + req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); + req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@@ -8652,7 -8762,7 +8759,7 @@@ static int bnxt_hwrm_port_phy_read(stru req.port_id = cpu_to_le16(bp->pf.port_id); req.phy_addr = phy_addr; req.reg_addr = cpu_to_le16(reg & 0x1f); - if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { + if (mdio_phy_id_is_c45(phy_addr)) { req.cl45_mdio = 1; req.phy_addr = mdio_phy_id_prtad(phy_addr); req.dev_addr = mdio_phy_id_devad(phy_addr); @@@ -8679,7 -8789,7 +8786,7 @@@ static int bnxt_hwrm_port_phy_write(str req.port_id = cpu_to_le16(bp->pf.port_id); req.phy_addr = phy_addr; req.reg_addr = cpu_to_le16(reg & 0x1f); - if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { + if (mdio_phy_id_is_c45(phy_addr)) { req.cl45_mdio = 1; req.phy_addr = mdio_phy_id_prtad(phy_addr); req.dev_addr = mdio_phy_id_devad(phy_addr); @@@ -8997,8 -9107,11 +9104,11 @@@ static bool bnxt_can_reserve_rings(stru /* If the chip and firmware supports RFS */ static bool bnxt_rfs_supported(struct bnxt *bp) { - if (bp->flags & BNXT_FLAG_CHIP_P5) + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) + return true; return false; + } if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) return true; if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) @@@ -9013,7 -9126,7 +9123,7 @@@ static bool bnxt_rfs_capable(struct bnx int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5) - return false; + return bnxt_rfs_supported(bp); if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) return false;
@@@ -9395,6 -9508,7 +9505,7 @@@ static void bnxt_sp_task(struct work_st if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); bnxt_hwrm_port_qstats_ext(bp); + bnxt_hwrm_pcie_qstats(bp); }
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { @@@ -10065,23 -10179,6 +10176,6 @@@ static int bnxt_bridge_setlink(struct n return rc; }
- static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, - size_t len) - { - struct bnxt *bp = netdev_priv(dev); - int rc; - - /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) - return -EOPNOTSUPP; - - rc = snprintf(buf, len, "p%d", bp->pf.port_id); - - if (rc >= len) - return -EOPNOTSUPP; - return 0; - } - int bnxt_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) { @@@ -10100,6 -10197,13 +10194,13 @@@ return 0; }
+ static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) + { + struct bnxt *bp = netdev_priv(dev); + + return &bp->dl_port; + } + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@@ -10131,8 -10235,7 +10232,7 @@@ .ndo_bpf = bnxt_xdp, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, - .ndo_get_port_parent_id = bnxt_get_port_parent_id, - .ndo_get_phys_port_name = bnxt_get_phys_port_name + .ndo_get_devlink_port = bnxt_get_devlink_port, };
static void bnxt_remove_one(struct pci_dev *pdev) @@@ -10456,6 -10559,26 +10556,26 @@@ static int bnxt_init_mac_addr(struct bn return rc; }
+ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) + { + struct pci_dev *pdev = bp->pdev; + int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + u32 dw; + + if (!pos) { + netdev_info(bp->dev, "Unable do read adapter's DSN"); + return -EOPNOTSUPP; + } + + /* DSN (two dw) is at an offset of 4 from the cap pos */ + pos += 4; + pci_read_config_dword(pdev, pos, &dw); + put_unaligned_le32(dw, &dsn[0]); + pci_read_config_dword(pdev, pos + 4, &dw); + put_unaligned_le32(dw, &dsn[4]); + return 0; + } + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed; @@@ -10589,6 -10712,12 +10709,12 @@@ rc = -1; goto init_err_pci_clean; } + + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", + rc); + rc = bnxt_init_mac_addr(bp); if (rc) { dev_err(&pdev->dev, "Unable to initialize mac address.\n"); @@@ -10596,6 -10725,11 +10722,11 @@@ goto init_err_pci_clean; }
+ /* Read the adapter's DSN to use as the eswitch switch_id */ + rc = bnxt_pcie_dsn_get(bp, bp->switch_id); + if (rc) + goto init_err_pci_clean; + bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); diff --combined drivers/net/ethernet/broadcom/tg3.c index 2aebd4bbb67d,664fedf0cd80..6d1f9c822548 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -1073,6 -1073,7 +1073,6 @@@ static void tg3_int_reenable(struct tg3 struct tg3 *tp = tnapi->tp;
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb();
/* When doing tagged status, this work check is unnecessary. * The last_tag we write above tells the chip which piece of @@@ -6998,6 -6999,7 +6998,6 @@@ next_pkt_nopost tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); } - mmiowb(); } else if (work_mask) { /* rx_std_buffers[] and rx_jmb_buffers[] entries must be * updated before the producer indices can be updated. @@@ -7208,6 -7210,8 +7208,6 @@@ static int tg3_poll_work(struct tg3_nap tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, dpr->rx_jmb_prod_idx);
- mmiowb(); - if (err) tw32_f(HOSTCC_MODE, tp->coal_now); } @@@ -7274,6 -7278,7 +7274,6 @@@ static int tg3_poll_msix(struct napi_st HOSTCC_MODE_ENABLE | tnapi->coal_now); } - mmiowb(); break; } } @@@ -8151,9 -8156,10 +8151,9 @@@ static netdev_tx_t tg3_start_xmit(struc netif_tx_wake_queue(txq); }
- if (!skb->xmit_more || netif_xmit_stopped(txq)) { + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { /* Packets are ready, update Tx producer idx on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); - mmiowb(); }
return NETDEV_TX_OK; @@@ -12757,9 -12763,6 +12757,6 @@@ static int tg3_set_phys_id(struct net_d { struct tg3 *tp = netdev_priv(dev);
- if (!netif_running(tp->dev)) - return -EAGAIN; - switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ diff --combined drivers/net/ethernet/intel/e1000/e1000_main.c index 466bf1ea186d,6f72ab139fd9..551de8c2fef2 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@@ -820,7 -820,7 +820,7 @@@ static int e1000_set_features(struct ne else e1000_reset(adapter);
- return 0; + return 1; }
static const struct net_device_ops e1000_netdev_ops = { @@@ -3267,9 -3267,14 +3267,9 @@@ static netdev_tx_t e1000_xmit_frame(str /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
- if (!skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); - /* we need this if more than one processor can write to - * our tail at a time, it synchronizes IO on IA64/Altix - * systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --combined drivers/net/ethernet/intel/e1000e/netdev.c index 022c3ac0e40f,a8fa4a1628f5..0e09bede42a2 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -3816,6 -3816,7 +3816,6 @@@ static void e1000_flush_tx_ring(struct if (tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; ew32(TDT(0), tx_ring->next_to_use); - mmiowb(); usleep_range(200, 250); }
@@@ -5896,13 -5897,19 +5896,13 @@@ static netdev_tx_t e1000_xmit_frame(str DIV_ROUND_UP(PAGE_SIZE, adapter->tx_fifo_limit) + 2));
- if (!skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, tx_ring->next_to_use); else writel(tx_ring->next_to_use, tx_ring->tail); - - /* we need this if more than one processor can write - * to our tail at a time, it synchronizes IO on - *IA64/Altix systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); @@@ -6996,7 -7003,7 +6996,7 @@@ static int e1000_set_features(struct ne else e1000e_reset(adapter);
- return 0; + return 1; }
static const struct net_device_ops e1000e_netdev_ops = { @@@ -7343,7 -7350,7 +7343,7 @@@ static int e1000_probe(struct pci_dev *
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
- if (pci_dev_run_wake(pdev)) + if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev);
return 0; diff --combined drivers/net/ethernet/intel/fm10k/fm10k_main.c index cbf76a96e94e,b4d970e44163..90270b4a1682 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@@ -280,7 -280,7 +280,7 @@@ static bool fm10k_add_rx_frag(struct fm /* we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); + pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); @@@ -1037,8 -1037,13 +1037,8 @@@ static void fm10k_tx_map(struct fm10k_r fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); }
return; diff --combined drivers/net/ethernet/intel/i40e/i40e_txrx.c index ffb611bbedfa,e1931701cd7e..20a283702c9f --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@@ -2035,7 -2035,8 +2035,8 @@@ static struct sk_buff *i40e_construct_s /* Determine available headroom for copy */ headlen = size; if (headlen > I40E_RX_HDR_SIZE) - headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, + I40E_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, @@@ -3469,8 -3470,13 +3470,8 @@@ static inline int i40e_tx_map(struct i4 first->next_to_watch = tx_desc;
/* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); }
return 0; diff --combined drivers/net/ethernet/intel/iavf/iavf_txrx.c index 6bfef82e7607,cf8be63a8a4f..06d1509d57f7 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@@ -1315,7 -1315,7 +1315,7 @@@ static struct sk_buff *iavf_construct_s /* Determine available headroom for copy */ headlen = size; if (headlen > IAVF_RX_HDR_SIZE) - headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); @@@ -2358,8 -2358,13 +2358,8 @@@ static inline void iavf_tx_map(struct i first->next_to_watch = tx_desc;
/* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); }
return; diff --combined drivers/net/ethernet/intel/ice/ice_txrx.c index 1af21bbe180e,30f9060c8b3f..2364eaf33d23 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@@ -6,6 -6,7 +6,7 @@@ #include <linux/prefetch.h> #include <linux/mm.h> #include "ice.h" + #include "ice_dcb_lib.h"
#define ICE_RX_HDR_SIZE 256
@@@ -100,8 -101,8 +101,8 @@@ void ice_free_tx_ring(struct ice_ring * * * Returns true if there's any budget left (e.g. the clean is finished) */ - static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, - int napi_budget) + static bool + ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; unsigned int budget = vsi->work_lmt; @@@ -236,9 -237,9 +237,9 @@@ int ice_setup_tx_ring(struct ice_ring * if (!tx_ring->tx_buf) return -ENOMEM;
- /* round up to nearest 4K */ + /* round up to nearest page */ tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), - 4096); + PAGE_SIZE); tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { @@@ -282,8 -283,17 +283,17 @@@ void ice_clean_rx_ring(struct ice_ring if (!rx_buf->page) continue;
- dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(rx_buf->page, 0); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(dev, rx_buf->dma, + rx_buf->page_offset, + ICE_RXBUF_2048, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
rx_buf->page = NULL; rx_buf->page_offset = 0; @@@ -339,9 -349,9 +349,9 @@@ int ice_setup_rx_ring(struct ice_ring * if (!rx_ring->rx_buf) return -ENOMEM;
- /* round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); + /* round up to nearest page */ + rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), + PAGE_SIZE); rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { @@@ -389,8 -399,8 +399,8 @@@ static void ice_release_rx_desc(struct * Returns true if the page was successfully allocated or * reused. */ - static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, - struct ice_rx_buf *bi) + static bool + ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) { struct page *page = bi->page; dma_addr_t dma; @@@ -409,7 -419,8 +419,8 @@@ }
/* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@@ -423,6 -434,8 +434,8 @@@ bi->dma = dma; bi->page = page; bi->page_offset = 0; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX;
return true; } @@@ -444,7 -457,7 +457,7 @@@ bool ice_alloc_rx_bufs(struct ice_ring if (!rx_ring->netdev || !cleaned_count) return false;
- /* get the RX descriptor and buffer based on next_to_use */ + /* get the Rx descriptor and buffer based on next_to_use */ rx_desc = ICE_RX_DESC(rx_ring, ntu); bi = &rx_ring->rx_buf[ntu];
@@@ -452,6 -465,12 +465,12 @@@ if (!ice_alloc_mapped_page(rx_ring, bi)) goto no_bufs;
+ /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + ICE_RXBUF_2048, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@@ -497,61 -516,43 +516,43 @@@ static bool ice_page_is_reserved(struc }
/** - * ice_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_buf: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware - * @skb: sk_buf to place the data into - * - * This function will add the data contained in rx_buf->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. + * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse + * @rx_buf: Rx buffer to adjust + * @size: Size of adjustment * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. + * Update the offset within page so that Rx buf will be ready to be reused. + * For systems with PAGE_SIZE < 8192 this function will flip the page offset + * so the second half of page assigned to Rx buffer will be used, otherwise + * the offset is moved by the @size bytes */ - static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) + static void + ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) { #if (PAGE_SIZE < 8192) - unsigned int truesize = ICE_RXBUF_2048; + /* flip page offset to other buffer */ + rx_buf->page_offset ^= size; #else - unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; - unsigned int truesize; - #endif /* PAGE_SIZE < 8192) */ - - struct page *page; - unsigned int size; - - size = le16_to_cpu(rx_desc->wb.pkt_len) & - ICE_RX_FLX_DESC_PKT_LEN_M; - - page = rx_buf->page; + /* move offset up to the next cache line */ + rx_buf->page_offset += size; + #endif + }
+ /** + * ice_can_reuse_rx_page - Determine if page can be reused for another Rx + * @rx_buf: buffer containing the page + * + * If page is reusable, we have a green light for calling ice_reuse_rx_page, + * which will assign the current buffer to the buffer that next_to_alloc is + * pointing to; otherwise, the DMA mapping needs to be destroyed and + * page freed + */ + static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) + { #if (PAGE_SIZE >= 8192) - truesize = ALIGN(size, L1_CACHE_BYTES); - #endif /* PAGE_SIZE >= 8192) */ - - /* will the data fit in the skb we allocated? if so, just - * copy it as it is pretty small anyway - */ - if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buf->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ice_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - __free_pages(page, 0); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buf->page_offset, size, truesize); + unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; + #endif + unsigned int pagecnt_bias = rx_buf->pagecnt_bias; + struct page *page = rx_buf->page;
/* avoid re-using remote pages */ if (unlikely(ice_page_is_reserved(page))) @@@ -559,36 -560,61 +560,61 @@@
#if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buf->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buf->page_offset += truesize; - if (rx_buf->page_offset > last_offset) return false; #endif /* PAGE_SIZE < 8192) */
- /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. */ - get_page(rx_buf->page); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buf->pagecnt_bias = USHRT_MAX; + }
return true; }
/** + * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag + * @rx_buf: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: packet length from rx_desc + * + * This function will add the data contained in rx_buf->page to the skb. + * It will just attach the page as a frag to the skb. + * The function will then update the page offset. + */ + static void + ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size) + { + #if (PAGE_SIZE >= 8192) + unsigned int truesize = SKB_DATA_ALIGN(size); + #else + unsigned int truesize = ICE_RXBUF_2048; + #endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->page_offset, size, truesize); + + /* page is being used so we must update the page offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); + } + + /** * ice_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter */ - static void ice_reuse_rx_page(struct ice_ring *rx_ring, - struct ice_rx_buf *old_buf) + static void + ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) { u16 nta = rx_ring->next_to_alloc; struct ice_rx_buf *new_buf; @@@ -599,121 -625,132 +625,132 @@@ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */ - *new_buf = *old_buf; + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buf->dma = old_buf->dma; + new_buf->page = old_buf->page; + new_buf->page_offset = old_buf->page_offset; + new_buf->pagecnt_bias = old_buf->pagecnt_bias; }
/** - * ice_fetch_rx_buf - Allocate skb and populate it + * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * @rx_ring: Rx descriptor ring to transact packets on - * @rx_desc: descriptor containing info written by hardware + * @skb: skb to be used + * @size: size of buffer to add to skb * - * This function allocates an skb on the fly, and populates it with the page - * data from the current receive descriptor, taking care to set up the skb - * correctly, as well as handling calling the page recycle function if - * necessary. + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. */ - static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) + static struct ice_rx_buf * + ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, + const unsigned int size) { struct ice_rx_buf *rx_buf; - struct sk_buff *skb; - struct page *page;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - page = rx_buf->page; - prefetchw(page); + prefetchw(rx_buf->page); + *skb = rx_buf->skb; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, + rx_buf->page_offset, size, + DMA_FROM_DEVICE);
- skb = rx_buf->skb; + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buf->pagecnt_bias--;
- if (likely(!skb)) { - u8 *page_addr = page_address(page) + rx_buf->page_offset; + return rx_buf; + }
- /* prefetch first cache line of first page */ - prefetch(page_addr); + /** + * ice_construct_skb - Allocate skb and populate it + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. + */ + static struct sk_buff * + ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + unsigned int size) + { + void *va = page_address(rx_buf->page) + rx_buf->page_offset; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch((void *)(page_addr + L1_CACHE_BYTES)); + prefetch((u8 *)va + L1_CACHE_BYTES); #endif /* L1_CACHE_BYTES */
- /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_buf_failed++; - return NULL; - } - - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL;
- skb_record_rx_queue(skb, rx_ring->q_index); - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, - rx_buf->page_offset, - ICE_RXBUF_2048, - DMA_FROM_DEVICE); + skb_record_rx_queue(skb, rx_ring->q_index); + /* Determine available headroom for copy */ + headlen = size; + if (headlen > ICE_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
- rx_buf->skb = NULL; - } + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
- /* pull page into skb */ - if (ice_add_rx_frag(rx_buf, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ice_reuse_rx_page(rx_ring, rx_buf); - rx_ring->rx_stats.page_reuse_count++; + /* if we exhaust the linear part then add what is left as a frag */ + size -= headlen; + if (size) { + #if (PAGE_SIZE >= 8192) + unsigned int truesize = SKB_DATA_ALIGN(size); + #else + unsigned int truesize = ICE_RXBUF_2048; + #endif + skb_add_rx_frag(skb, 0, rx_buf->page, + rx_buf->page_offset + headlen, size, truesize); + /* buffer is used by skb, update page_offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + /* buffer is unused, reset bias back to rx_buf; data was copied + * onto skb's linear part so there's no need for adjusting + * page offset and we can reuse this buffer as-is + */ + rx_buf->pagecnt_bias++; }
- /* clear contents of buffer_info */ - rx_buf->page = NULL; - return skb; }
/** - * ice_pull_tail - ice specific version of skb_pull_tail - * @skb: pointer to current skb being adjusted + * ice_put_rx_buf - Clean up used buffer and either recycle or free + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from * - * This function is an ice specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. + * This function will clean up the contents of the rx_buf. It will + * either recycle the buffer or unmap it and free the associated resources. */ - static void ice_pull_tail(struct sk_buff *skb) + static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int pull_len; - unsigned char *va; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + /* hand second half of page back to the ring */ + if (ice_can_reuse_rx_page(rx_buf)) { + ice_reuse_rx_page(rx_ring, rx_buf); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); + }
- /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; + /* clear contents of buffer_info */ + rx_buf->page = NULL; + rx_buf->skb = NULL; }
/** @@@ -730,10 -767,6 +767,6 @@@ */ static bool ice_cleanup_headers(struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ice_pull_tail(skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@@ -751,8 -784,8 +784,8 @@@ * The status_error_len doesn't need to be shifted because it begins * at offset zero. */ - static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, - const u16 stat_err_bits) + static bool + ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) { return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits)); @@@ -769,9 -802,9 +802,9 @@@ * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. */ - static bool ice_is_non_eop(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) + static bool + ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1;
@@@ -838,8 -871,9 +871,9 @@@ ice_rx_hash(struct ice_ring *rx_ring, u * * skb->protocol must be set before this function is called */ - static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, - union ice_32b_rx_flex_desc *rx_desc, u8 ptype) + static void + ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, + union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; u32 rx_error, rx_status; @@@ -909,9 -943,10 +943,10 @@@ checksum_fail * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. */ - static void ice_process_skb_fields(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 ptype) + static void + ice_process_skb_fields(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 ptype) { ice_rx_hash(rx_ring, rx_desc, skb, ptype);
@@@ -925,18 -960,17 +960,17 @@@ * ice_receive_skb - Send a completed packet up the stack * @rx_ring: Rx ring in play * @skb: packet to send up - * @vlan_tag: vlan tag for packet + * @vlan_tag: VLAN tag for packet * * This function sends the completed packet (via. skb) up the stack using - * gro receive functions (with/without vlan tag) + * gro receive functions (with/without VLAN tag) */ - static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, - u16 vlan_tag) + static void + ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) { + (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } napi_gro_receive(&rx_ring->q_vector->napi, skb); }
@@@ -958,10 -992,12 +992,12 @@@ static int ice_clean_rx_irq(struct ice_ u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); bool failure = false;
- /* start the loop to process RX packets bounded by 'budget' */ + /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; + struct ice_rx_buf *rx_buf; struct sk_buff *skb; + unsigned int size; u16 stat_err_bits; u16 vlan_tag = 0; u8 rx_ptype; @@@ -973,7 -1009,7 +1009,7 @@@ cleaned_count = 0; }
- /* get the RX desc from RX ring based on 'next_to_clean' */ + /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
/* status_error_len will always be zero for unused descriptors @@@ -991,11 -1027,24 +1027,24 @@@ */ dma_rmb();
+ size = le16_to_cpu(rx_desc->wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M; + + rx_buf = ice_get_rx_buf(rx_ring, &skb, size); /* allocate (if needed) and populate skb */ - skb = ice_fetch_rx_buf(rx_ring, rx_desc); - if (!skb) + if (skb) + ice_add_rx_frag(rx_buf, skb, size); + else + skb = ice_construct_skb(rx_ring, rx_buf, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buf->pagecnt_bias++; break; + }
+ ice_put_rx_buf(rx_ring, rx_buf); cleaned_count++;
/* skip if it is NOP desc */ @@@ -1049,17 -1098,247 +1098,247 @@@ }
/** + * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic + * @port_info: port_info structure containing the current link speed + * @avg_pkt_size: average size of Tx or Rx packets based on clean routine + * @itr: itr value to update + * + * Calculate how big of an increment should be applied to the ITR value passed + * in based on wmem_default, SKB overhead, Ethernet overhead, and the current + * link speed. + * + * The following is a calculation derived from: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to: + * + * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 + * ITR = -------------------------------------------- * -------------- + * rate pkt_size + 640 + */ + static unsigned int + ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, + unsigned int avg_pkt_size, + unsigned int itr) + { + switch (port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_100GB: + itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_50GB: + itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_40GB: + itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_25GB: + itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_20GB: + itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_10GB: + /* fall through */ + default: + itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + } + + if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { + itr &= ICE_ITR_ADAPTIVE_LATENCY; + itr += ICE_ITR_ADAPTIVE_MAX_USECS; + } + + return itr; + } + + /** + * ice_update_itr - update the adaptive ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + */ + static void + ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) + { + unsigned long next_update = jiffies; + unsigned int packets, bytes, itr; + bool container_is_rx; + + if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) + return; + + /* If itr_countdown is set it means we programmed an ITR within + * the last 4 interrupt cycles. This has a side effect of us + * potentially firing an early interrupt. In order to work around + * this we need to throw out any data received for a few + * interrupts following the update. + */ + if (q_vector->itr_countdown) { + itr = rc->target_itr; + goto clear_counts; + } + + container_is_rx = (&q_vector->rx == rc); + /* For Rx we want to push the delay up and default to low latency. + * for Tx we want to pull the delay down and default to high latency. + */ + itr = container_is_rx ? + ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : + ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, rc->next_update)) + goto clear_counts; + + packets = rc->total_pkts; + bytes = rc->total_bytes; + + if (container_is_rx) { + /* If Rx there are 1 to 4 packets and bytes are less than + * 9000 assume insufficient data to use bulk rate limiting + * approach unless Tx is already in bulk rate limiting. We + * are likely latency driven. + */ + if (packets && packets < 4 && bytes < 9000 && + (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { + itr = ICE_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size_and_speed; + } + } else if (packets < 4) { + /* If we have Tx and Rx ITR maxed and Tx ITR is running in + * bulk mode and we are receiving 4 or fewer packets just + * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so + * that the Rx can relax. + */ + if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & ICE_ITR_MASK) == + ICE_ITR_ADAPTIVE_MAX_USECS) + goto clear_counts; + } else if (packets > 32) { + /* If we have processed over 32 packets in a single interrupt + * for Tx assume we need to switch over to "bulk" mode. + */ + rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; + } + + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * Between 4 and 56 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 56) { + itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; + if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { + itr &= ICE_ITR_ADAPTIVE_LATENCY; + itr += ICE_ITR_ADAPTIVE_MAX_USECS; + } + goto clear_counts; + } + + if (packets <= 256) { + itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); + itr &= ICE_ITR_MASK; + + /* Between 56 and 112 is our "goldilocks" zone where we are + * working out "just right". Just report that our current + * ITR is good for us. + */ + if (packets <= 112) + goto clear_counts; + + /* If packet count is 128 or greater we are likely looking + * at a slight overrun of the delay we want. Try halving + * our delay to see if that will cut the number of packets + * in half per interrupt. + */ + itr >>= 1; + itr &= ICE_ITR_MASK; + if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) + itr = ICE_ITR_ADAPTIVE_MIN_USECS; + + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since + * number of packets is greater than 256. We are just going to have + * to compute a value and try to bring the count under control, + * though for smaller packet sizes there isn't much we can do as + * NAPI polling will likely be kicking in sooner rather than later. + */ + itr = ICE_ITR_ADAPTIVE_BULK; + + adjust_by_size_and_speed: + + /* based on checks above packets cannot be 0 so division is safe */ + itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, + bytes / packets, itr); + + clear_counts: + /* write back value */ + rc->target_itr = itr; + + /* next update should occur within next jiffy */ + rc->next_update = next_update + 1; + + rc->total_bytes = 0; + rc->total_pkts = 0; + } + + /** * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register * @itr_idx: interrupt throttling index - * @reg_itr: interrupt throttling value adjusted based on ITR granularity + * @itr: interrupt throttling value in usecs */ - static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) + static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) { + /* The itr value is reported in microseconds, and the register value is + * recorded in 2 microsecond units. For this reason we only need to + * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this + * granularity as a shift instead of division. The mask makes sure the + * ITR value is never odd so we don't accidentally write into the field + * prior to the ITR field. + */ + itr &= ICE_ITR_MASK; + return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | - (reg_itr << GLINT_DYN_CTL_INTERVAL_S); + (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); }
+ /* The act of updating the ITR will cause it to immediately trigger. In order + * to prevent this from throwing off adaptive update statistics we defer the + * update so that it can only happen so often. So after either Tx or Rx are + * updated we make the adaptive scheme wait until either the ITR completely + * expires via the next_update expiration or we have been through at least + * 3 interrupts. + */ + #define ITR_COUNTDOWN_START 3 + /** * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt * @vsi: the VSI associated with the q_vector @@@ -1068,10 -1347,14 +1347,14 @@@ static void ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { - struct ice_hw *hw = &vsi->back->hw; - struct ice_ring_container *rc; + struct ice_ring_container *tx = &q_vector->tx; + struct ice_ring_container *rx = &q_vector->rx; u32 itr_val;
+ /* This will do nothing if dynamic updates are not enabled */ + ice_update_itr(q_vector, tx); + ice_update_itr(q_vector, rx); + /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a * pseudo-lazy update with the following criteria. @@@ -1080,35 -1363,36 +1363,36 @@@ * 2. If we must reduce an ITR that is given highest priority. * 3. We then give priority to increasing ITR based on amount. */ - if (q_vector->rx.target_itr < q_vector->rx.current_itr) { - rc = &q_vector->rx; + if (rx->target_itr < rx->current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; - } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || - ((q_vector->rx.target_itr - q_vector->rx.current_itr) < - (q_vector->tx.target_itr - q_vector->tx.current_itr))) { - rc = &q_vector->tx; + itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); + rx->current_itr = rx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if ((tx->target_itr < tx->current_itr) || + ((rx->target_itr - rx->current_itr) < + (tx->target_itr - tx->current_itr))) { /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; - } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { - rc = &q_vector->rx; + itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); + tx->current_itr = tx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if (rx->current_itr != rx->target_itr) { /* Rx ITR needs to be increased, third priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; + itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); + rx->current_itr = rx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* Still have to re-enable the interrupts */ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + if (q_vector->itr_countdown) + q_vector->itr_countdown--; }
- if (!test_bit(__ICE_DOWN, vsi->state)) { - int vector = vsi->hw_base_vector + q_vector->v_idx; - - wr32(hw, GLINT_DYN_CTL(vector), itr_val); - } + if (!test_bit(__ICE_DOWN, vsi->state)) + wr32(&vsi->back->hw, + GLINT_DYN_CTL(q_vector->reg_idx), + itr_val); }
/** @@@ -1354,8 -1638,13 +1638,8 @@@ ice_tx_map(struct ice_ring *tx_ring, st ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); }
return; @@@ -1475,7 -1764,7 +1759,7 @@@ int ice_tx_csum(struct ice_tx_buf *firs }
/** - * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW * @tx_ring: ring to send buffer on * @first: pointer to struct ice_tx_buf * @@@ -1501,7 -1790,7 +1785,7 @@@ ice_tx_prepare_vlan_flags(struct ice_ri * to the encapsulated ethertype. */ skb->protocol = vlan_get_protocol(skb); - goto out; + return 0; }
/* if we have a HW VLAN tag being added, default to the HW one */ @@@ -1523,8 -1812,7 +1807,7 @@@ first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; }
- out: - return 0; + return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); }
/** @@@ -1561,6 -1849,7 +1844,7 @@@ int ice_tso(struct ice_tx_buf *first, s if (err < 0) return err;
+ /* cppcheck-suppress unreadVariable */ ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb);
diff --combined drivers/net/ethernet/intel/igb/igb_main.c index 1d71ec360b1c,9b8a4bb25327..39f33afc479c --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -2480,7 -2480,7 +2480,7 @@@ static int igb_set_features(struct net_ else igb_reset(adapter);
- return 0; + return 1; }
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@@ -3452,6 -3452,9 +3452,9 @@@ static int igb_probe(struct pci_dev *pd break; } } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + pm_runtime_put_noidle(&pdev->dev); return 0;
@@@ -6026,8 -6029,13 +6029,8 @@@ static int igb_tx_map(struct igb_ring * /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0;
@@@ -8043,7 -8051,7 +8046,7 @@@ static struct sk_buff *igb_construct_sk /* Determine available headroom for copy */ headlen = size; if (headlen > IGB_RX_HDR_LEN) - headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); diff --combined drivers/net/ethernet/intel/igc/igc_main.c index f8d692f6aa4f,e58a6e0dc4d9..34fa0e60a780 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@@ -620,6 -620,55 +620,55 @@@ static void igc_configure_tx(struct igc */ static void igc_setup_mrqc(struct igc_adapter *adapter) { + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); }
/** @@@ -890,8 -939,13 +939,8 @@@ static int igc_tx_map(struct igc_ring * /* Make sure there is space in the ring for the next send. */ igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); }
return 0; @@@ -1145,7 -1199,7 +1194,7 @@@ static struct sk_buff *igc_construct_sk /* Determine available headroom for copy */ headlen = size; if (headlen > IGC_RX_HDR_LEN) - headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); @@@ -1733,12 -1787,200 +1782,200 @@@ void igc_up(struct igc_adapter *adapter * igc_update_stats - Update the board statistics counters * @adapter: board private structure */ - static void igc_update_stats(struct igc_adapter *adapter) + void igc_update_stats(struct igc_adapter *adapter) { + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.symerrs += rd32(IGC_SYMERRS); + adapter->stats.sec += rd32(IGC_SEC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + adapter->stats.tsctfc += rd32(IGC_TSCTFC); + + adapter->stats.iac += rd32(IGC_IAC); + adapter->stats.icrxoc += rd32(IGC_ICRXOC); + adapter->stats.icrxptc += rd32(IGC_ICRXPTC); + adapter->stats.icrxatc += rd32(IGC_ICRXATC); + adapter->stats.ictxptc += rd32(IGC_ICTXPTC); + adapter->stats.ictxatc += rd32(IGC_ICTXATC); + adapter->stats.ictxqec += rd32(IGC_ICTXQEC); + adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); }
static void igc_nfc_filter_exit(struct igc_adapter *adapter) { + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igc_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); + } + + static void igc_nfc_filter_restore(struct igc_adapter *adapter) + { + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); }
/** @@@ -1885,6 -2127,86 +2122,86 @@@ static struct net_device_stats *igc_get return &netdev->stats; }
+ static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) + { + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; + } + + static int igc_set_features(struct net_device *netdev, + netdev_features_t features) + { + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igc_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; + } + + static netdev_features_t + igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) + { + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; + } + /** * igc_configure - configure the hardware for RX and TX * @adapter: private board structure @@@ -1901,6 -2223,7 +2218,7 @@@ static void igc_configure(struct igc_ad igc_setup_mrqc(adapter); igc_setup_rctl(adapter);
+ igc_nfc_filter_restore(adapter); igc_configure_tx(adapter); igc_configure_rx(adapter);
@@@ -1962,6 -2285,127 +2280,127 @@@ static void igc_set_default_mac_filter( igc_rar_set_index(adapter, 0); }
+ /* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ + static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry, + const u8 *addr, const u8 flags) + { + if (!(entry->state & IGC_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGC_MAC_STATE_SRC_ADDR) != + (flags & IGC_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; + } + + /* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used. + */ + static int igc_add_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) + { + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igc_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags; + + igc_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; + } + + int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) + { + return igc_add_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); + } + + /* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGC_MAC_STATE_SRC_ADDR can be used. + */ + static int igc_del_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) + { + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } + + igc_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; + } + + int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) + { + return igc_del_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); + } + /** * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@@ -3429,6 -3873,9 +3868,9 @@@ static const struct net_device_ops igc_ .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats = igc_get_stats, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, };
/* PCIe configuration access */ @@@ -3658,6 -4105,9 +4100,9 @@@ static int igc_probe(struct pci_dev *pd if (err) goto err_sw_init;
+ /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 99e23cf6a73a,7b903206b534..57fd9ee6de66 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -1800,7 -1800,7 +1800,7 @@@ static void ixgbe_pull_tail(struct ixgb * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); + pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); @@@ -8297,8 -8297,13 +8297,8 @@@ static int ixgbe_tx_map(struct ixgbe_ri
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); }
return 0; @@@ -8478,8 -8483,7 +8478,7 @@@ static void ixgbe_atr(struct ixgbe_rin
#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; @@@ -8509,7 -8513,7 +8508,7 @@@ break; /* fall through */ default: - return fallback(dev, skb, sb_dev); + return netdev_pick_tx(dev, skb, sb_dev); }
f = &adapter->ring_feature[RING_F_FCOE]; @@@ -9791,7 -9795,7 +9790,7 @@@ static int ixgbe_set_features(struct ne NETIF_F_HW_VLAN_CTAG_FILTER)) ixgbe_set_rx_mode(netdev);
- return 0; + return 1; }
/** diff --combined drivers/net/ethernet/marvell/sky2.c index 49486c10ef81,c4050ec594f4..9d070cca3e9e --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@@ -1139,6 -1139,9 +1139,6 @@@ static inline void sky2_put_idx(struct /* Make sure write' to descriptors are complete before we tell hardware */ wmb(); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); - - /* Synchronize I/O on since next processor may write to tail */ - mmiowb(); }
@@@ -1351,6 -1354,7 +1351,6 @@@ stopped
/* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - mmiowb(); }
/* Clean out receive buffer area, assumes receiver hardware stopped */ @@@ -4804,7 -4808,7 +4804,7 @@@ static struct net_device *sky2_init_net * 2) from internal registers set by bootloader */ iap = of_get_mac_address(hw->pdev->dev.of_node); - if (iap) + if (!IS_ERR(iap)) memcpy(dev->dev_addr, iap, ETH_ALEN); else memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, diff --combined drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d84d3cdbaae1,746c8cc95e48..937ba4bcb056 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@@ -917,6 -917,7 +917,6 @@@ static void cmd_work_handler(struct wor mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); - mmiowb(); /* if not in polling don't use ent after this point */ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); @@@ -1346,7 -1347,7 +1346,7 @@@ static void set_wqname(struct mlx5_core struct mlx5_cmd *cmd = &dev->cmd;
snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", - dev->priv.name); + dev_name(dev->device)); }
static void clean_debug_files(struct mlx5_core_dev *dev) @@@ -1851,7 -1852,7 +1851,7 @@@ static void create_msg_cache(struct mlx
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { - struct device *ddev = &dev->pdev->dev; + struct device *ddev = dev->device;
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, &cmd->alloc_dma, GFP_KERNEL); @@@ -1882,7 -1883,7 +1882,7 @@@
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { - struct device *ddev = &dev->pdev->dev; + struct device *ddev = dev->device;
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma); @@@ -1907,8 -1908,7 +1907,7 @@@ int mlx5_cmd_init(struct mlx5_core_dev return -EINVAL; }
- cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align, - 0); + cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0); if (!cmd->pool) return -ENOMEM;
diff --combined drivers/net/ethernet/mellanox/mlxsw/Kconfig index 9050aa5f5f52,b6b3ff0fe17f..7ccb950aa7d4 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@@ -4,6 -4,7 +4,7 @@@
config MLXSW_CORE tristate "Mellanox Technologies Switch ASICs support" + select NET_DEVLINK ---help--- This driver supports Mellanox Technologies Switch ASICs family.
@@@ -21,6 -22,7 +22,6 @@@ config MLXSW_CORE_HWMO config MLXSW_CORE_THERMAL bool "Thermal zone support for Mellanox Technologies Switch ASICs" depends on MLXSW_CORE && THERMAL - depends on !(MLXSW_CORE=y && THERMAL=m) default y ---help--- Say Y here if you want to automatically control fans speed according diff --combined drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 4555c0b161ef,f0a2ca23f63a..8911a97ab0ca --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@@ -652,9 -652,9 +652,9 @@@ static void qede_get_drvinfo(struct net { char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; struct qede_dev *edev = netdev_priv(ndev); + char mbi[ETHTOOL_FWVERS_LEN];
strlcpy(info->driver, "qede", sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", edev->dev_info.common.fw_major, @@@ -668,13 -668,27 +668,27 @@@ (edev->dev_info.common.mfw_rev >> 8) & 0xFF, edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < - sizeof(info->fw_version)) { + if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) < + sizeof(info->version)) + snprintf(info->version, sizeof(info->version), + "%s [storm %s]", DRV_MODULE_VERSION, storm); + else + snprintf(info->version, sizeof(info->version), + "%s %s", DRV_MODULE_VERSION, storm); + + if (edev->dev_info.common.mbi_version) { + snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d", + (edev->dev_info.common.mbi_version & + QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET, + (edev->dev_info.common.mbi_version & + QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET, + (edev->dev_info.common.mbi_version & + QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET); snprintf(info->fw_version, sizeof(info->fw_version), - "mfw %s storm %s", mfw, storm); + "mbi %s [mfw %s]", mbi, mfw); } else { snprintf(info->fw_version, sizeof(info->fw_version), - "%s %s", mfw, storm); + "mfw %s", mfw); }
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); @@@ -1526,6 -1540,14 +1540,6 @@@ static int qede_selftest_transmit_traff barrier(); writel(txq->tx_db.raw, txq->doorbell_addr);
- /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { if (qede_txq_has_work(txq)) break; diff --combined drivers/net/ethernet/qlogic/qede/qede_fp.c index 6f7e3622c6b4,954015d2011a..0ae28f0d2523 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@@ -580,6 -580,14 +580,6 @@@ void qede_update_rx_prod(struct qede_de
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); }
static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) @@@ -1657,12 -1665,12 +1657,12 @@@ netdev_tx_t qede_start_xmit(struct sk_b txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq)) qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { - if (skb->xmit_more) + if (netdev_xmit_more()) qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq); @@@ -1688,8 -1696,7 +1688,7 @@@ }
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct qede_dev *edev = netdev_priv(dev); int total_txq; @@@ -1697,7 -1704,7 +1696,7 @@@ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ? - fallback(dev, skb, NULL) % total_txq : 0; + netdev_pick_tx(dev, skb, NULL) % total_txq : 0; }
/* 8B udp header + 8B base tunnel header + 32B option length */ diff --combined drivers/net/ethernet/realtek/r8169.c index f66208353e3b,549be1c76a89..2e20334b76a1 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -491,10 -491,6 +491,6 @@@ enum rtl_register_content PCIDAC = (1 << 4), PCIMulRW = (1 << 3), #define INTT_MASK GENMASK(1, 0) - INTT_0 = 0x0000, // 8168 - INTT_1 = 0x0001, // 8168 - INTT_2 = 0x0002, // 8168 - INTT_3 = 0x0003, // 8168
/* rtl8169_PHYstatus */ TBI_Enable = 0x80, @@@ -703,6 -699,8 +699,8 @@@ struct rtl8169_private u32 ocp_base; };
+ typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + MODULE_AUTHOR("Realtek and the Linux r8169 crew netdev@vger.kernel.org"); MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); module_param_named(debug, debug.msg_enable, int, 0); @@@ -777,9 -775,9 +775,9 @@@ static bool rtl_loop_wait(struct rtl816 int i;
for (i = 0; i < n; i++) { - delay(d); if (c->check(tp) == high) return true; + delay(d); } netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", c->msg, !high, n, d); @@@ -1067,8 -1065,8 +1065,8 @@@ DECLARE_RTL_COND(rtl_eriar_cond return RTL_R32(tp, ERIAR) & ERIAR_FLAG; }
- static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, - u32 val, int type) + static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) { BUG_ON((addr & 3) || (mask == 0)); RTL_W32(tp, ERIDR, val); @@@ -1077,7 -1075,13 +1075,13 @@@ rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); }
- static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) + static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) + { + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); + } + + static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) { RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
@@@ -1085,13 -1089,30 +1089,30 @@@ RTL_R32(tp, ERIDR) : ~0; }
+ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) + { + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); + } + static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, - u32 m, int type) + u32 m) { u32 val;
- val = rtl_eri_read(tp, addr, type); - rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); + val = rtl_eri_read(tp, addr); + rtl_eri_write(tp, addr, mask, (val & ~m) | p); + } + + static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask, + u32 p) + { + rtl_w0w1_eri(tp, addr, mask, p, 0); + } + + static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask, + u32 m) + { + rtl_w0w1_eri(tp, addr, mask, 0, m); }
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) @@@ -1103,7 -1124,7 +1124,7 @@@
static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { - return rtl_eri_read(tp, reg, ERIAR_OOB); + return _rtl_eri_read(tp, reg, ERIAR_OOB); }
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, @@@ -1117,13 -1138,13 +1138,13 @@@ static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) { - rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, - data, ERIAR_OOB); + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); }
static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) { - rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); } @@@ -1259,19 -1280,10 +1280,10 @@@ static bool r8168_check_dash(struct rtl } }
- struct exgmac_reg { - u16 addr; - u16 mask; - u32 val; - }; - - static void rtl_write_exgmac_batch(struct rtl8169_private *tp, - const struct exgmac_reg *r, int len) + static void rtl_reset_packet_filter(struct rtl8169_private *tp) { - while (len-- > 0) { - rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); - r++; - } + rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0)); }
DECLARE_RTL_COND(rtl_efusear_cond) @@@ -1327,48 -1339,31 +1339,31 @@@ static void rtl_link_chg_patch(struct r if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { if (phydev->speed == SPEED_1000) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); } else if (phydev->speed == SPEED_100) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); } else { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); } - /* Reset packet filter */ - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, - ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, - ERIAR_EXGMAC); + rtl_reset_packet_filter(tp); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { if (phydev->speed == SPEED_1000) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); } else { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { if (phydev->speed == SPEED_10) { - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); } else { - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); } } } @@@ -1409,19 -1404,11 +1404,11 @@@ static void __rtl8169_set_wol(struct rt case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: tmp = ARRAY_SIZE(cfg) - 1; if (wolopts & WAKE_MAGIC) - rtl_w0w1_eri(tp, - 0x0dc, - ERIAR_MASK_0100, - MagicPacket_v2, - 0x0000, - ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100, + MagicPacket_v2); else - rtl_w0w1_eri(tp, - 0x0dc, - ERIAR_MASK_0100, - 0x0000, - MagicPacket_v2, - ERIAR_EXGMAC); + rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100, + MagicPacket_v2); break; default: tmp = ARRAY_SIZE(cfg); @@@ -2293,8 -2280,8 +2280,8 @@@ struct phy_reg u16 val; };
- static void rtl_writephy_batch(struct rtl8169_private *tp, - const struct phy_reg *regs, int len) + static void __rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) { while (len-- > 0) { rtl_writephy(tp, regs->reg, regs->val); @@@ -2302,6 -2289,8 +2289,8 @@@ } }
+ #define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a)) + #define PHY_READ 0x00000000 #define PHY_DATA_OR 0x10000000 #define PHY_DATA_AND 0x20000000 @@@ -2564,7 -2553,11 +2553,11 @@@ static void rtl_apply_firmware_cond(str
static void rtl8168_config_eee_mac(struct rtl8169_private *tp) { - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003); }
static void rtl8168f_config_eee_phy(struct rtl8169_private *tp) @@@ -2653,7 -2646,7 +2646,7 @@@ static void rtl8169s_hw_phy_config(stru { 0x00, 0x9200 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) @@@ -2664,7 -2657,7 +2657,7 @@@ { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) @@@ -2722,7 -2715,7 +2715,7 @@@ static void rtl8169scd_hw_phy_config(st { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
rtl8169scd_hw_phy_config_quirk(tp); } @@@ -2777,7 -2770,7 +2770,7 @@@ static void rtl8169sce_hw_phy_config(st { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) @@@ -2790,7 -2783,7 +2783,7 @@@ rtl_writephy(tp, 0x1f, 0x0001); rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) @@@ -2801,7 -2794,7 +2794,7 @@@ { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) @@@ -2814,7 -2807,7 +2807,7 @@@ { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) @@@ -2829,7 -2822,7 +2822,7 @@@ rtl_patchphy(tp, 0x14, 1 << 5); rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) @@@ -2854,7 -2847,7 +2847,7 @@@ { 0x09, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x14, 1 << 5); rtl_patchphy(tp, 0x0d, 1 << 5); @@@ -2881,7 -2874,7 +2874,7 @@@ static void rtl8168c_2_hw_phy_config(st { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x16, 1 << 0); rtl_patchphy(tp, 0x14, 1 << 5); @@@ -2903,7 -2896,7 +2896,7 @@@ static void rtl8168c_3_hw_phy_config(st { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x16, 1 << 0); rtl_patchphy(tp, 0x14, 1 << 5); @@@ -2959,7 -2952,7 +2952,7 @@@ static void rtl8168d_1_hw_phy_config(st { 0x0d, 0xf880 } };
- rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + rtl_writephy_batch(tp, phy_reg_init_0);
/* * Rx Error Issue @@@ -2980,7 -2973,7 +2973,7 @@@ }; int val;
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
val = rtl_readphy(tp, 0x0d);
@@@ -3006,7 -2999,7 +2999,7 @@@ { 0x06, 0x6662 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
/* RSET couple improve */ @@@ -3070,7 -3063,7 +3063,7 @@@ static void rtl8168d_2_hw_phy_config(st { 0x0d, 0xf880 } };
- rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + rtl_writephy_batch(tp, phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { static const struct phy_reg phy_reg_init[] = { @@@ -3084,7 -3077,7 +3077,7 @@@ }; int val;
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
val = rtl_readphy(tp, 0x0d); if ((val & 0x00ff) != 0x006c) { @@@ -3109,7 -3102,7 +3102,7 @@@ { 0x06, 0x2642 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
/* Fine tune PLL performance */ @@@ -3187,7 -3180,7 +3180,7 @@@ static void rtl8168d_3_hw_phy_config(st { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) @@@ -3202,7 -3195,7 +3195,7 @@@ { 0x1f, 0x0000 } };
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); rtl_patchphy(tp, 0x0d, 1 << 5); }
@@@ -3238,7 -3231,7 +3231,7 @@@ static void rtl8168e_1_hw_phy_config(st
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
/* DCO enable for 10M IDLE Power */ rtl_writephy(tp, 0x1f, 0x0007); @@@ -3286,14 -3279,11 +3279,11 @@@ static void rtl_rar_exgmac_set(struct r addr[2] | (addr[3] << 8), addr[4] | (addr[5] << 8) }; - const struct exgmac_reg e[] = { - { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, - { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, - { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, - { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } - };
- rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16)); }
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) @@@ -3327,7 -3317,7 +3317,7 @@@
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
/* For 4-corner performance improve */ rtl_writephy(tp, 0x1f, 0x0005); @@@ -3436,7 -3426,7 +3426,7 @@@ static void rtl8168f_1_hw_phy_config(st
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
rtl8168f_hw_phy_config(tp);
@@@ -3502,7 -3492,7 +3492,7 @@@ static void rtl8411_hw_phy_config(struc rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init);
/* Modify green table for giga */ rtl_writephy(tp, 0x1f, 0x0005); @@@ -3922,7 -3912,7 +3912,7 @@@ static void rtl8102e_hw_phy_config(stru rtl_patchphy(tp, 0x19, 1 << 13); rtl_patchphy(tp, 0x10, 1 << 15);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) @@@ -3948,7 -3938,7 +3938,7 @@@
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_writephy_batch(tp, phy_reg_init); }
static void rtl8402_hw_phy_config(struct rtl8169_private *tp) @@@ -3961,7 -3951,7 +3951,7 @@@ rtl_apply_firmware(tp);
/* EEE setting */ - rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x10, 0x401f); rtl_writephy(tp, 0x19, 0x7030); @@@ -3984,139 -3974,73 +3974,73 @@@ static void rtl8106e_hw_phy_config(stru
rtl_apply_firmware(tp);
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + rtl_writephy_batch(tp, phy_reg_init);
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); }
static void rtl_hw_phy_config(struct net_device *dev) { + static const rtl_generic_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = NULL, + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = NULL, + [RTL_GIGA_MAC_VER_15] = NULL, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + }; struct rtl8169_private *tp = netdev_priv(dev);
- switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - break; - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - rtl8169s_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_04: - rtl8169sb_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_05: - rtl8169scd_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_06: - rtl8169sce_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - rtl8102e_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_11: - rtl8168bb_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_12: - rtl8168bef_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_17: - rtl8168bef_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_18: - rtl8168cp_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_19: - rtl8168c_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_20: - rtl8168c_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_21: - rtl8168c_3_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_22: - rtl8168c_4_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - rtl8168cp_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_25: - rtl8168d_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_26: - rtl8168d_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_27: - rtl8168d_3_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_28: - rtl8168d_4_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - rtl8105e_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_31: - /* None. */ - break; - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - rtl8168e_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_34: - rtl8168e_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_35: - rtl8168f_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_36: - rtl8168f_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_37: - rtl8402_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_38: - rtl8411_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_39: - rtl8106e_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_40: - rtl8168g_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - rtl8168g_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_47: - rtl8168h_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_48: - rtl8168h_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_49: - rtl8168ep_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - rtl8168ep_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_41: - default: - break; - } + if (phy_configs[tp->mac_version]) + phy_configs[tp->mac_version](tp); }
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) @@@ -4147,14 -4071,6 +4071,6 @@@ static void rtl8169_init_phy(struct net phy_speed_up(tp->phydev);
genphy_soft_reset(tp->phydev); - - /* It was reported that several chips end up with 10MBit/Half on a - * 1GBit link after resuming from S3. For whatever reason the PHY on - * these chips doesn't properly start a renegotiation when soft-reset. - * Explicitly requesting a renegotiation fixes this. - */ - if (tp->phydev->autoneg == AUTONEG_ENABLE) - phy_restart_aneg(tp->phydev); }
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@@ -4283,8 -4199,7 +4199,7 @@@ static void r8168_pll_power_down(struc case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: - rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, - 0xfc000000, ERIAR_EXGMAC); + rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000); RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; } @@@ -4312,8 -4227,7 +4227,7 @@@ static void r8168_pll_power_up(struct r case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, - 0x00000000, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000); break; }
@@@ -4703,6 -4617,8 +4617,8 @@@ static void rtl_hw_start(struct rtl816 rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp);
+ /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(tp, IntrMask); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); @@@ -4735,12 -4651,6 +4651,6 @@@ static void rtl_hw_start_8169(struct rt
rtl8169_set_magic_reg(tp, tp->mac_version);
- /* - * Undocumented corner. Supposedly: - * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets - */ - RTL_W16(tp, IntrMitigate, 0x0000); - RTL_W32(tp, RxMissed, 0); }
@@@ -4801,8 -4711,8 +4711,8 @@@ struct ephy_info u16 bits; };
- static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, - int len) + static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) { u16 w;
@@@ -4813,6 -4723,8 +4723,8 @@@ } }
+ #define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + static void rtl_disable_clock_request(struct rtl8169_private *tp) { pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, @@@ -4844,6 -4756,24 +4756,24 @@@ static void rtl_hw_aspm_clkreq_enable(s udelay(10); }
+ static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) + { + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); + } + + static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) + { + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); + } + static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@@ -4893,7 -4823,7 +4823,7 @@@ static void rtl_hw_start_8168cp_1(struc
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + rtl_ephy_init(tp, e_info_8168cp);
__rtl_hw_start_8168cp(tp); } @@@ -4941,7 -4871,7 +4871,7 @@@ static void rtl_hw_start_8168c_1(struc
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + rtl_ephy_init(tp, e_info_8168c_1);
__rtl_hw_start_8168cp(tp); } @@@ -4955,7 -4885,7 +4885,7 @@@ static void rtl_hw_start_8168c_2(struc
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + rtl_ephy_init(tp, e_info_8168c_2);
__rtl_hw_start_8168cp(tp); } @@@ -5013,7 -4943,7 +4943,7 @@@ static void rtl_hw_start_8168d_4(struc
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4)); + rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp); } @@@ -5038,7 -4968,7 +4968,7 @@@ static void rtl_hw_start_8168e_1(struc
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + rtl_ephy_init(tp, e_info_8168e_1);
if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@@ -5063,19 -4993,18 +4993,18 @@@ static void rtl_hw_start_8168e_2(struc
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + rtl_ephy_init(tp, e_info_8168e_2);
if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4)); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
@@@ -5083,9 -5012,6 +5012,6 @@@
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - rtl8168_config_eee_mac(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); @@@ -5101,16 -5027,14 +5027,14 @@@ static void rtl_hw_start_8168f(struct r
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
@@@ -5135,12 -5059,9 +5059,9 @@@ static void rtl_hw_start_8168f_1(struc
rtl_hw_start_8168f(tp);
- rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + rtl_ephy_init(tp, e_info_8168f_1);
- /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00); }
static void rtl_hw_start_8411(struct rtl8169_private *tp) @@@ -5155,39 -5076,33 +5076,33 @@@ rtl_hw_start_8168f(tp); rtl_pcie_state_l2l3_disable(tp);
- rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + rtl_ephy_init(tp, e_info_8168f_1);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00); }
static void rtl_hw_start_8168g(struct rtl8169_private *tp) { - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
rtl_pcie_state_l2l3_disable(tp); } @@@ -5205,7 -5120,7 +5120,7 @@@ static void rtl_hw_start_8168g_1(struc
/* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_ephy_init(tp, e_info_8168g_1); rtl_hw_aspm_clkreq_enable(tp, true); }
@@@ -5223,7 -5138,7 +5138,7 @@@ static void rtl_hw_start_8168g_2(struc /* disable aspm and clock request before access ephy */ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); - rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); + rtl_ephy_init(tp, e_info_8168g_2); }
static void rtl_hw_start_8411_2(struct rtl8169_private *tp) @@@ -5240,7 -5155,7 +5155,7 @@@
/* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + rtl_ephy_init(tp, e_info_8411_2); rtl_hw_aspm_clkreq_enable(tp, true); }
@@@ -5259,34 -5174,28 +5174,28 @@@ static void rtl_hw_start_8168h_1(struc
/* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); + rtl_ephy_init(tp, e_info_8168h_1);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_reset_packet_filter(tp);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
- rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
- rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
@@@ -5295,7 -5204,7 +5204,7 @@@
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@@ -5345,34 -5254,28 +5254,28 @@@ static void rtl_hw_start_8168ep(struct { rtl8168ep_stop_cmac(tp);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_reset_packet_filter(tp);
- rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
- rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - - /* Adjust EEE LED frequency */ - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@@ -5391,7 -5294,7 +5294,7 @@@ static void rtl_hw_start_8168ep_1(struc
/* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); + rtl_ephy_init(tp, e_info_8168ep_1);
rtl_hw_start_8168ep(tp);
@@@ -5408,7 -5311,7 +5311,7 @@@ static void rtl_hw_start_8168ep_2(struc
/* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); + rtl_ephy_init(tp, e_info_8168ep_2);
rtl_hw_start_8168ep(tp);
@@@ -5430,7 -5333,7 +5333,7 @@@ static void rtl_hw_start_8168ep_3(struc
/* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); + rtl_ephy_init(tp, e_info_8168ep_3);
rtl_hw_start_8168ep(tp);
@@@ -5453,128 -5356,6 +5356,6 @@@ rtl_hw_aspm_clkreq_enable(tp, true); }
- static void rtl_hw_start_8168(struct rtl8169_private *tp) - { - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - tp->cp_cmd &= ~INTT_MASK; - tp->cp_cmd |= PktCntrDisable | INTT_1; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - - RTL_W16(tp, IntrMitigate, 0x5100); - - /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { - tp->irq_mask |= RxFIFOOver; - tp->irq_mask &= ~RxOverflow; - } - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - rtl_hw_start_8168bb(tp); - break; - - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - rtl_hw_start_8168bef(tp); - break; - - case RTL_GIGA_MAC_VER_18: - rtl_hw_start_8168cp_1(tp); - break; - - case RTL_GIGA_MAC_VER_19: - rtl_hw_start_8168c_1(tp); - break; - - case RTL_GIGA_MAC_VER_20: - rtl_hw_start_8168c_2(tp); - break; - - case RTL_GIGA_MAC_VER_21: - rtl_hw_start_8168c_3(tp); - break; - - case RTL_GIGA_MAC_VER_22: - rtl_hw_start_8168c_4(tp); - break; - - case RTL_GIGA_MAC_VER_23: - rtl_hw_start_8168cp_2(tp); - break; - - case RTL_GIGA_MAC_VER_24: - rtl_hw_start_8168cp_3(tp); - break; - - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - rtl_hw_start_8168d(tp); - break; - - case RTL_GIGA_MAC_VER_28: - rtl_hw_start_8168d_4(tp); - break; - - case RTL_GIGA_MAC_VER_31: - rtl_hw_start_8168dp(tp); - break; - - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - rtl_hw_start_8168e_1(tp); - break; - case RTL_GIGA_MAC_VER_34: - rtl_hw_start_8168e_2(tp); - break; - - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - rtl_hw_start_8168f_1(tp); - break; - - case RTL_GIGA_MAC_VER_38: - rtl_hw_start_8411(tp); - break; - - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_hw_start_8168g_1(tp); - break; - case RTL_GIGA_MAC_VER_42: - rtl_hw_start_8168g_2(tp); - break; - - case RTL_GIGA_MAC_VER_44: - rtl_hw_start_8411_2(tp); - break; - - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - rtl_hw_start_8168h_1(tp); - break; - - case RTL_GIGA_MAC_VER_49: - rtl_hw_start_8168ep_1(tp); - break; - - case RTL_GIGA_MAC_VER_50: - rtl_hw_start_8168ep_2(tp); - break; - - case RTL_GIGA_MAC_VER_51: - rtl_hw_start_8168ep_3(tp); - break; - - default: - netif_err(tp, drv, tp->dev, - "unknown chipset (mac_version = %d)\n", - tp->mac_version); - break; - } - } - static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8102e_1[] = { @@@ -5603,7 -5384,7 +5384,7 @@@ if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) RTL_W8(tp, Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); + rtl_ephy_init(tp, e_info_8102e_1); }
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) @@@ -5645,7 -5426,7 +5426,7 @@@ static void rtl_hw_start_8105e_1(struc RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
- rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + rtl_ephy_init(tp, e_info_8105e_1);
rtl_pcie_state_l2l3_disable(tp); } @@@ -5670,17 -5451,15 +5451,15 @@@ static void rtl_hw_start_8402(struct rt
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); + rtl_ephy_init(tp, e_info_8402);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
rtl_pcie_state_l2l3_disable(tp); } @@@ -5700,6 -5479,73 +5479,73 @@@ static void rtl_hw_start_8106(struct rt rtl_hw_aspm_clkreq_enable(tp, true); }
+ static void rtl_hw_config(struct rtl8169_private *tp) + { + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = NULL, + [RTL_GIGA_MAC_VER_15] = NULL, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); + } + + static void rtl_hw_start_8168(struct rtl8169_private *tp) + { + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + /* Workaround for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->irq_mask |= RxFIFOOver; + tp->irq_mask &= ~RxOverflow; + } + + rtl_hw_config(tp); + } + static void rtl_hw_start_8101(struct rtl8169_private *tp) { if (tp->mac_version >= RTL_GIGA_MAC_VER_30) @@@ -5715,43 -5561,7 +5561,7 @@@ tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - rtl_hw_start_8102e_1(tp); - break; - - case RTL_GIGA_MAC_VER_08: - rtl_hw_start_8102e_3(tp); - break; - - case RTL_GIGA_MAC_VER_09: - rtl_hw_start_8102e_2(tp); - break; - - case RTL_GIGA_MAC_VER_29: - rtl_hw_start_8105e_1(tp); - break; - case RTL_GIGA_MAC_VER_30: - rtl_hw_start_8105e_2(tp); - break; - - case RTL_GIGA_MAC_VER_37: - rtl_hw_start_8402(tp); - break; - - case RTL_GIGA_MAC_VER_39: - rtl_hw_start_8106(tp); - break; - case RTL_GIGA_MAC_VER_43: - rtl_hw_start_8168g_2(tp); - break; - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - rtl_hw_start_8168h_1(tp); - break; - } - - RTL_W16(tp, IntrMitigate, 0x0000); + rtl_hw_config(tp); }
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@@ -6268,7 -6078,7 +6078,7 @@@ static netdev_tx_t rtl8169_start_xmit(s */ smp_mb(); if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) - netif_wake_queue(dev); + netif_start_queue(dev); }
return NETDEV_TX_OK; @@@ -6543,10 -6353,8 +6353,8 @@@ static irqreturn_t rtl8169_interrupt(in set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); }
- if (status & (RTL_EVENT_NAPI | LinkChg)) { - rtl_irq_disable(tp); - napi_schedule_irqoff(&tp->napi); - } + rtl_irq_disable(tp); + napi_schedule_irqoff(&tp->napi); out: rtl_ack_events(tp, status);
@@@ -6645,8 -6453,7 +6453,7 @@@ static int r8169_phy_connect(struct rtl if (!tp->supports_gmii) phy_set_max_speed(phydev, SPEED_100);
- /* Ensure to advertise everything, incl. pause */ - linkmode_copy(phydev->advertising, phydev->supported); + phy_support_asym_pause(phydev);
phy_attached_info(phydev);
@@@ -7123,13 -6930,13 +6930,13 @@@ static void rtl_read_mac_address(struc switch (tp->mac_version) { case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + value = rtl_eri_read(tp, 0xe0); mac_addr[0] = (value >> 0) & 0xff; mac_addr[1] = (value >> 8) & 0xff; mac_addr[2] = (value >> 16) & 0xff; mac_addr[3] = (value >> 24) & 0xff;
- value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + value = rtl_eri_read(tp, 0xe4); mac_addr[4] = (value >> 0) & 0xff; mac_addr[5] = (value >> 8) & 0xff; break; @@@ -7185,7 -6992,8 +6992,7 @@@ static int r8169_mdio_register(struct r new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_IGNORE_INTERRUPT; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", - PCI_DEVID(pdev->bus->number, pdev->devfn)); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; diff --combined drivers/net/ethernet/renesas/ravb_main.c index 316b47741d3f,d3ffcf5b445a..ef8f08931fe8 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -111,7 -111,7 +111,7 @@@ static void ravb_set_buffer_align(struc */ static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) { - if (mac) { + if (!IS_ERR(mac)) { ether_addr_copy(ndev->dev_addr, mac); } else { u32 mahr = ravb_read(ndev, MAHR); @@@ -728,6 -728,7 +728,6 @@@ static irqreturn_t ravb_emac_interrupt(
spin_lock(&priv->lock); ravb_emac_interrupt_unlocked(ndev); - mmiowb(); spin_unlock(&priv->lock); return IRQ_HANDLED; } @@@ -847,6 -848,7 +847,6 @@@ static irqreturn_t ravb_interrupt(int i result = IRQ_HANDLED; }
- mmiowb(); spin_unlock(&priv->lock); return result; } @@@ -879,6 -881,7 +879,6 @@@ static irqreturn_t ravb_multi_interrupt result = IRQ_HANDLED; }
- mmiowb(); spin_unlock(&priv->lock); return result; } @@@ -895,6 -898,7 +895,6 @@@ static irqreturn_t ravb_dma_interrupt(i if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED;
- mmiowb(); spin_unlock(&priv->lock); return result; } @@@ -939,6 -943,7 +939,6 @@@ static int ravb_poll(struct napi_struc ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } } @@@ -954,6 -959,7 +954,6 @@@ ravb_write(ndev, mask, RIE0); ravb_write(ndev, mask, TIE); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags);
/* Receive error message handling */ @@@ -1002,6 -1008,7 +1002,6 @@@ static void ravb_adjust_link(struct net if (priv->no_avb_link && phydev->link) ravb_rcv_snd_enable(ndev);
- mmiowb(); spin_unlock_irqrestore(&priv->lock, flags);
if (new_state && netif_msg_link(priv)) @@@ -1594,6 -1601,7 +1594,6 @@@ static netdev_tx_t ravb_start_xmit(stru netif_stop_subqueue(ndev, q);
exit: - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK;
@@@ -1607,8 -1615,7 +1607,7 @@@ drop }
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : @@@ -1665,6 -1672,7 +1664,6 @@@ static void ravb_set_rx_mode(struct net spin_lock_irqsave(&priv->lock, flags); ravb_modify(ndev, ECMR, ECMR_PRM, ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); }
@@@ -1961,6 -1969,13 +1960,13 @@@ static void ravb_set_config_mode(struc } }
+ static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = { + { .soc_id = "r8a774c0" }, + { .soc_id = "r8a77990" }, + { .soc_id = "r8a77995" }, + { /* sentinel */ } + }; + /* Set tx and rx clock internal delay modes */ static void ravb_set_delay_mode(struct net_device *ndev) { @@@ -1972,8 -1987,12 +1978,12 @@@ set |= APSR_DM_RDM;
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) - set |= APSR_DM_TDM; + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + if (!WARN(soc_device_match(ravb_delay_mode_quirk_match), + "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree", + phy_modes(priv->phy_interface))) + set |= APSR_DM_TDM; + }
ravb_modify(ndev, APSR, APSR_DM, set); } diff --combined drivers/net/ethernet/renesas/sh_eth.c index ed30aebdb941,4d4be6612583..7c4e282242d5 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -2010,6 -2010,7 +2010,6 @@@ static void sh_eth_adjust_link(struct n if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) sh_eth_rcv_snd_enable(ndev);
- mmiowb(); spin_unlock_irqrestore(&mdp->lock, flags);
if (new_state && netif_msg_link(mdp)) @@@ -3192,7 -3193,7 +3192,7 @@@ static struct sh_eth_plat_data *sh_eth_ pdata->phy_interface = ret;
mac_addr = of_get_mac_address(np); - if (mac_addr) + if (!IS_ERR(mac_addr)) memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
pdata->no_ether_link = diff --combined drivers/net/ethernet/wiznet/w5100.c index 1713c2d2dccf,b0052933993b..8788953eaafd --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@@ -219,6 -219,7 +219,6 @@@ static inline int __w5100_write_direct( static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) { __w5100_write_direct(ndev, addr, data); - mmiowb();
return 0; } @@@ -235,6 -236,7 +235,6 @@@ static int w5100_write16_direct(struct { __w5100_write_direct(ndev, addr, data >> 8); __w5100_write_direct(ndev, addr + 1, data); - mmiowb();
return 0; } @@@ -258,6 -260,8 +258,6 @@@ static int w5100_writebulk_direct(struc for (i = 0; i < len; i++, addr++) __w5100_write_direct(ndev, addr, *buf++);
- mmiowb(); - return 0; }
@@@ -371,6 -375,7 +371,6 @@@ static int w5100_readbulk_indirect(stru for (i = 0; i < len; i++) *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
- mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0; @@@ -389,6 -394,7 +389,6 @@@ static int w5100_writebulk_indirect(str for (i = 0; i < len; i++) __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
- mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0; @@@ -1158,7 -1164,7 +1158,7 @@@ int w5100_probe(struct device *dev, con INIT_WORK(&priv->setrx_work, w5100_setrx_work); INIT_WORK(&priv->restart_work, w5100_restart_work);
- if (mac_addr) + if (!IS_ERR_OR_NULL(mac_addr)) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); else eth_hw_addr_random(ndev); diff --combined drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 4f5eec7e44bd,b55fa9efa1e3..803fcbac4152 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@@ -896,6 -896,8 +896,8 @@@ void iwl_pcie_apply_destination(struct if (!trans->num_blocks) return;
+ IWL_DEBUG_FW(trans, + "WRT: applying DRAM buffer[0] destination\n"); iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, trans->fw_mon[0].physical >> MON_BUFF_SHIFT_VER2); @@@ -2067,6 -2069,7 +2069,6 @@@ static void iwl_trans_pcie_release_nic_ * MAC_ACCESS_REQ bit to be performed before any other writes * scheduled on different CPUs (after we drop reg_lock). */ - mmiowb(); out: spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); } @@@ -2441,9 -2444,8 +2443,8 @@@ void iwl_pcie_dump_csr(struct iwl_tran #ifdef CONFIG_IWLWIFI_DEBUGFS /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, trans, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(#name, mode, parent, trans, \ + &iwl_dbgfs_##name##_ops); \ } while (0)
/* file operation */ @@@ -2686,16 -2688,17 +2687,17 @@@ static ssize_t iwl_dbgfs_rfkill_write(s { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool old = trans_pcie->debug_rfkill; + bool new_value; int ret;
- ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill); + ret = kstrtobool_from_user(user_buf, count, &new_value); if (ret) return ret; - if (old == trans_pcie->debug_rfkill) + if (new_value == trans_pcie->debug_rfkill) return count; IWL_WARN(trans, "changing debug rfkill %d->%d\n", - old, trans_pcie->debug_rfkill); + trans_pcie->debug_rfkill, new_value); + trans_pcie->debug_rfkill = new_value; iwl_pcie_handle_rfkill_irq(trans);
return count; @@@ -2846,7 -2849,7 +2848,7 @@@ static const struct file_operations iwl };
/* Create the debugfs files and directories */ - int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) + void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { struct dentry *dir = trans->dbgfs_dir;
@@@ -2857,11 -2860,6 +2859,6 @@@ DEBUGFS_ADD_FILE(fh_reg, dir, 0400); DEBUGFS_ADD_FILE(rfkill, dir, 0600); DEBUGFS_ADD_FILE(monitor_data, dir, 0400); - return 0; - - err: - IWL_ERR(trans, "failed to create the trans debugfs entry\n"); - return -ENOMEM; }
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) @@@ -3011,10 -3009,14 +3008,14 @@@ static voi iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data) { - u32 base, write_ptr, wrap_cnt; + u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- /* If there was a dest TLV - use the values from there */ - if (trans->ini_valid) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; + base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; + write_ptr = DBGC_CUR_DBGBUF_STATUS; + wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; + } else if (trans->ini_valid) { base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2); write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2); wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2); @@@ -3027,12 -3029,18 +3028,18 @@@ write_ptr = MON_BUFF_WRPTR; wrap_cnt = MON_BUFF_CYCLE_CNT; } - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); + + write_ptr_val = iwl_read_prph(trans, write_ptr); fw_mon_data->fw_mon_cycle_cnt = cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + fw_mon_data->fw_mon_base_high_ptr = + cpu_to_le32(iwl_read_prph(trans, base_high)); + write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; + } + fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); }
static u32 @@@ -3043,9 -3051,10 +3050,10 @@@ iwl_trans_pcie_dump_monitor(struct iwl_ u32 len = 0;
if ((trans->num_blocks && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - (trans->dbg_dest_tlv && !trans->ini_valid) || - (trans->ini_valid && trans->num_blocks)) { + (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 || + trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 || + trans->ini_valid)) || + (trans->dbg_dest_tlv && !trans->ini_valid)) { struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); @@@ -3164,8 -3173,10 +3172,10 @@@ static struct iwl_trans_dump_dat len = sizeof(*dump_data);
/* host commands */ - len += sizeof(*data) + - cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) + len += sizeof(*data) + + cmdq->n_window * (sizeof(*txcmd) + + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) @@@ -3411,7 -3422,7 +3421,7 @@@ struct iwl_trans *iwl_trans_pcie_alloc( ret = -ENOMEM; goto out_no_pci; } - + trans_pcie->debug_rfkill = -1;
if (!cfg->base_params->pcie_l1_allowed) { /* @@@ -3539,6 -3550,9 +3549,9 @@@ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { trans->cfg = &iwlax210_2ax_cfg_so_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { + trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0; } } else if (cfg == &iwl_ax101_cfg_qu_hr) { if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == diff --combined drivers/net/wireless/mac80211_hwsim.c index c71adb1f1f41,0dcb511f44e2..60ca13e0f15b --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@@ -409,8 -409,8 +409,8 @@@ static int mac80211_hwsim_vendor_cmd_te int err; u32 val;
- err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, - hwsim_vendor_test_policy, NULL); + err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, + data_len, hwsim_vendor_test_policy, NULL); if (err) return err; if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) @@@ -521,7 -521,7 +521,7 @@@ struct mac80211_hwsim_data unsigned int rx_filter; bool started, idle, scanning; struct mutex mutex; - struct tasklet_hrtimer beacon_timer; + struct hrtimer beacon_timer; enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; @@@ -1460,7 -1460,7 +1460,7 @@@ static void mac80211_hwsim_stop(struct { struct mac80211_hwsim_data *data = hw->priv; data->started = false; - tasklet_hrtimer_cancel(&data->beacon_timer); + hrtimer_cancel(&data->beacon_timer); wiphy_dbg(hw->wiphy, "%s\n", __func__); }
@@@ -1583,12 -1583,14 +1583,12 @@@ static enum hrtimer_restar mac80211_hwsim_beacon(struct hrtimer *timer) { struct mac80211_hwsim_data *data = - container_of(timer, struct mac80211_hwsim_data, - beacon_timer.timer); + container_of(timer, struct mac80211_hwsim_data, beacon_timer); struct ieee80211_hw *hw = data->hw; u64 bcn_int = data->beacon_int; - ktime_t next_bcn;
if (!data->started) - goto out; + return HRTIMER_NORESTART;
ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, @@@ -1599,9 -1601,12 +1599,9 @@@ bcn_int -= data->bcn_delta; data->bcn_delta = 0; } - - next_bcn = ktime_add(hrtimer_get_expires(timer), - ns_to_ktime(bcn_int * 1000)); - tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS); -out: - return HRTIMER_NORESTART; + hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer), + ns_to_ktime(bcn_int * NSEC_PER_USEC)); + return HRTIMER_RESTART; }
static const char * const hwsim_chanwidths[] = { @@@ -1675,15 -1680,15 +1675,15 @@@ static int mac80211_hwsim_config(struc mutex_unlock(&data->mutex);
if (!data->started || !data->beacon_int) - tasklet_hrtimer_cancel(&data->beacon_timer); - else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { + hrtimer_cancel(&data->beacon_timer); + else if (!hrtimer_is_queued(&data->beacon_timer)) { u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); u32 bcn_int = data->beacon_int; u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
- tasklet_hrtimer_start(&data->beacon_timer, - ns_to_ktime(until_tbtt * 1000), - HRTIMER_MODE_REL); + hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * NSEC_PER_USEC), + HRTIMER_MODE_REL_SOFT); }
return 0; @@@ -1746,7 -1751,7 +1746,7 @@@ static void mac80211_hwsim_bss_info_cha info->enable_beacon, info->beacon_int); vp->bcn_en = info->enable_beacon; if (data->started && - !hrtimer_is_queued(&data->beacon_timer.timer) && + !hrtimer_is_queued(&data->beacon_timer) && info->enable_beacon) { u64 tsf, until_tbtt; u32 bcn_int; @@@ -1754,10 -1759,9 +1754,10 @@@ tsf = mac80211_hwsim_get_tsf(hw, vif); bcn_int = data->beacon_int; until_tbtt = bcn_int - do_div(tsf, bcn_int); - tasklet_hrtimer_start(&data->beacon_timer, - ns_to_ktime(until_tbtt * 1000), - HRTIMER_MODE_REL); + + hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * NSEC_PER_USEC), + HRTIMER_MODE_REL_SOFT); } else if (!info->enable_beacon) { unsigned int count = 0; ieee80211_iterate_active_interfaces_atomic( @@@ -1766,7 -1770,7 +1766,7 @@@ wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u", count); if (count == 0) { - tasklet_hrtimer_cancel(&data->beacon_timer); + hrtimer_cancel(&data->beacon_timer); data->beacon_int = 0; } } @@@ -1932,8 -1936,8 +1932,8 @@@ static int mac80211_hwsim_testmode_cmd( struct sk_buff *skb; int err, ps;
- err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, - hwsim_testmode_policy, NULL); + err = nla_parse_deprecated(tb, HWSIM_TM_ATTR_MAX, data, len, + hwsim_testmode_policy, NULL); if (err) return err;
@@@ -2806,6 -2810,12 +2806,12 @@@ static int mac80211_hwsim_new_radio(str ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); + + /* We only have SW crypto and only implement the A-MPDU API + * (but don't really build A-MPDUs) so can have extended key + * support + */ + ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); @@@ -2929,9 -2939,9 +2935,9 @@@
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- tasklet_hrtimer_init(&data->beacon_timer, - mac80211_hwsim_beacon, - CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_SOFT); + data->beacon_timer.function = mac80211_hwsim_beacon;
err = ieee80211_register_hw(hw); if (err < 0) { @@@ -3627,35 -3637,35 +3633,35 @@@ done static const struct genl_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_register_received_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_FRAME, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_cloned_frame_received_nl, }, { .cmd = HWSIM_CMD_TX_INFO_FRAME, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_tx_info_frame_received_nl, }, { .cmd = HWSIM_CMD_NEW_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_DEL_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_GET_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_get_radio_nl, .dumpit = hwsim_dump_radio_nl, }, @@@ -3665,6 -3675,7 +3671,7 @@@ static struct genl_family hwsim_genl_fa .name = "MAC80211_HWSIM", .version = 1, .maxattr = HWSIM_ATTR_MAX, + .policy = hwsim_genl_policy, .netnsok = true, .module = THIS_MODULE, .ops = hwsim_ops, @@@ -3901,6 -3912,8 +3908,8 @@@ static int __init init_mac80211_hwsim(v param.p2p_device = support_p2p_device; param.use_chanctx = channels > 1; param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; + if (param.p2p_device) + param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
err = mac80211_hwsim_new_radio(NULL, ¶m); if (err < 0) diff --combined drivers/staging/rtl8723bs/os_dep/os_intfs.c index 9021bf519605,0a20a4e9e19a..8a9d838af24e --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@@ -309,6 -309,9 +309,6 @@@ static uint loadparam(struct adapter *p registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
registry_par->max_roaming_times = (u8)rtw_max_roaming_times; -#ifdef CONFIG_INTEL_WIDI - registry_par->max_roaming_times = (u8)rtw_max_roaming_times + 2; -#endif /* CONFIG_INTEL_WIDI */
registry_par->enable80211d = (u8)rtw_80211d;
@@@ -401,8 -404,7 +401,7 @@@ static unsigned int rtw_classify8021d(s
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@@ -754,7 -756,7 +753,7 @@@ u8 rtw_init_drv_sw(struct adapter *pada
rtw_init_hal_com_default_value(padapter);
- if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) { + if (rtw_init_cmd_priv(&padapter->cmdpriv)) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n")); ret8 = _FAIL; goto exit; @@@ -762,7 -764,7 +761,7 @@@
padapter->cmdpriv.padapter = padapter;
- if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) { + if (rtw_init_evt_priv(&padapter->evtpriv)) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n")); ret8 = _FAIL; goto exit; @@@ -813,6 -815,14 +812,6 @@@
rtw_hal_dm_init(padapter);
-#ifdef CONFIG_INTEL_WIDI - if (rtw_init_intel_widi(padapter) == _FAIL) { - DBG_871X("Can't rtw_init_intel_widi\n"); - ret8 = _FAIL; - goto exit; - } -#endif /* CONFIG_INTEL_WIDI */ - exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n")); @@@ -849,6 -859,10 +848,6 @@@ u8 rtw_free_drv_sw(struct adapter *pada { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw"));
-#ifdef CONFIG_INTEL_WIDI - rtw_free_intel_widi(padapter); -#endif /* CONFIG_INTEL_WIDI */ - free_mlme_ext_priv(&padapter->mlmeextpriv);
rtw_free_cmd_priv(&padapter->cmdpriv); diff --combined include/linux/filter.h index 7d3abde3f183,fb0edad75971..7148bab96943 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -20,7 -20,6 +20,7 @@@ #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> +#include <linux/vmalloc.h>
#include <net/sch_generic.h>
@@@ -34,6 -33,8 +34,8 @@@ struct bpf_prog_aux struct xdp_rxq_info; struct xdp_buff; struct sock_reuseport; + struct ctl_table; + struct ctl_table_header;
/* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@@ -504,6 -505,7 +506,6 @@@ struct bpf_prog u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ jit_requested:1,/* archs need to JIT the prog */ - undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@@ -733,15 -735,24 +735,15 @@@ bpf_ctx_narrow_access_ok(u32 off, u32 s
static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - fp->undo_set_mem = 1; + set_vm_flush_reset_perms(fp); set_memory_ro((unsigned long)fp, fp->pages); }
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) -{ - if (fp->undo_set_mem) - set_memory_rw((unsigned long)fp, fp->pages); -} - static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { + set_vm_flush_reset_perms(hdr); set_memory_ro((unsigned long)hdr, hdr->pages); -} - -static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) -{ - set_memory_rw((unsigned long)hdr, hdr->pages); + set_memory_x((unsigned long)hdr, hdr->pages); }
static inline struct bpf_binary_header * @@@ -779,6 -790,7 +781,6 @@@ void __bpf_prog_free(struct bpf_prog *f
static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { - bpf_prog_unlock_ro(fp); __bpf_prog_free(fp); }
@@@ -1167,4 -1179,18 +1169,18 @@@ struct bpf_sock_ops_kern */ };
+ struct bpf_sysctl_kern { + struct ctl_table_header *head; + struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + /* Temporary "register" for indirect stores to ppos. */ + u64 tmp_reg; + }; + #endif /* __LINUX_FILTER_H__ */ diff --combined include/linux/mlx5/driver.h index 951039253105,5a39b323c52e..5a27246db883 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@@ -56,7 -56,6 +56,6 @@@
enum { MLX5_BOARD_ID_LEN = 64, - MLX5_MAX_NAME_LEN = 16, };
enum { @@@ -513,8 -512,13 +512,13 @@@ struct mlx5_rl_table struct mlx5_rl_entry *rl_entry; };
+ struct mlx5_core_roce { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_flow_handle *allow_rule; + }; + struct mlx5_priv { - char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table *eq_table;
/* pages stuff */ @@@ -567,6 -571,7 +571,7 @@@ struct mlx5_lag *lag; struct mlx5_devcom *devcom; unsigned long pci_dev_data; + struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table;
@@@ -643,6 -648,7 +648,7 @@@ struct mlx5_fw_tracer struct mlx5_vxlan;
struct mlx5_core_dev { + struct device *device; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; @@@ -683,6 -689,7 +689,6 @@@ #endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; - struct page *clock_info_page; struct mlx5_fw_tracer *tracer; };
diff --combined include/net/xfrm.h index 99f722c4d804,debcc5198e33..a2907873ed56 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@@ -132,6 -132,17 +132,17 @@@ struct xfrm_state_offload u8 flags; };
+ struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; + }; + + /* Flags for xfrm_mode. */ + enum { + XFRM_MODE_FLAG_TUNNEL = 1, + }; + /* Full description of state of transformer. */ struct xfrm_state { possible_net_t xs_net; @@@ -219,7 -230,7 +230,7 @@@ struct xfrm_stats stats;
struct xfrm_lifetime_cur curlft; - struct tasklet_hrtimer mtimer; + struct hrtimer mtimer;
struct xfrm_state_offload xso;
@@@ -234,9 -245,9 +245,9 @@@ /* Reference to data common to all the instances of this * transformer. */ const struct xfrm_type *type; - struct xfrm_mode *inner_mode; - struct xfrm_mode *inner_mode_iaf; - struct xfrm_mode *outer_mode; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode;
const struct xfrm_type_offload *type_offload;
@@@ -316,13 -327,6 +327,6 @@@ struct xfrm_policy_afinfo xfrm_address_t *saddr, xfrm_address_t *daddr, u32 mark); - void (*decode_session)(struct sk_buff *skb, - struct flowi *fl, - int reverse); - int (*get_tos)(const struct flowi *fl); - int (*init_path)(struct xfrm_dst *path, - struct dst_entry *dst, - int nfheader_len); int (*fill_dst)(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl); @@@ -348,7 -352,6 +352,6 @@@ struct xfrm_state_afinfo struct module *owner; const struct xfrm_type *type_map[IPPROTO_MAX]; const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX]; - struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x); void (*init_tempsel)(struct xfrm_selector *sel, @@@ -423,78 -426,6 +426,6 @@@ struct xfrm_type_offload int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
- struct xfrm_mode { - /* - * Remove encapsulation header. - * - * The IP header will be moved over the top of the encapsulation - * header. - * - * On entry, the transport header shall point to where the IP header - * should be and the network header shall be set to where the IP - * header currently is. skb->data shall point to the start of the - * payload. - */ - int (*input2)(struct xfrm_state *x, struct sk_buff *skb); - - /* - * This is the actual input entry point. - * - * For transport mode and equivalent this would be identical to - * input2 (which does not need to be set). While tunnel mode - * and equivalent would set this to the tunnel encapsulation function - * xfrm4_prepare_input that would in turn call input2. - */ - int (*input)(struct xfrm_state *x, struct sk_buff *skb); - - /* - * Add encapsulation header. - * - * On exit, the transport header will be set to the start of the - * encapsulation header to be filled in by x->type->output and - * the mac header will be set to the nextheader (protocol for - * IPv4) field of the extension header directly preceding the - * encapsulation header, or in its absence, that of the top IP - * header. The value of the network header will always point - * to the top IP header while skb->data will point to the payload. - */ - int (*output2)(struct xfrm_state *x,struct sk_buff *skb); - - /* - * This is the actual output entry point. - * - * For transport mode and equivalent this would be identical to - * output2 (which does not need to be set). While tunnel mode - * and equivalent would set this to a tunnel encapsulation function - * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn - * call output2. - */ - int (*output)(struct xfrm_state *x, struct sk_buff *skb); - - /* - * Adjust pointers into the packet and do GSO segmentation. - */ - struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features); - - /* - * Adjust pointers into the packet when IPsec is done at layer2. - */ - void (*xmit)(struct xfrm_state *x, struct sk_buff *skb); - - struct xfrm_state_afinfo *afinfo; - struct module *owner; - unsigned int encap; - int flags; - }; - - /* Flags for xfrm_mode. */ - enum { - XFRM_MODE_FLAG_TUNNEL = 1, - }; - - int xfrm_register_mode(struct xfrm_mode *mode, int family); - int xfrm_unregister_mode(struct xfrm_mode *mode, int family); - static inline int xfrm_af2proto(unsigned int family) { switch(family) { @@@ -507,13 -438,13 +438,13 @@@ } }
- static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto) + static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto) { if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6)) - return x->inner_mode; + return &x->inner_mode; else - return x->inner_mode_iaf; + return &x->inner_mode_iaf; }
struct xfrm_tmpl { @@@ -1623,7 -1554,6 +1554,6 @@@ int xfrm_init_replay(struct xfrm_state int xfrm_state_mtu(struct xfrm_state *x, int mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); - int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm_input_resume(struct sk_buff *skb, int nexthdr); int xfrm_trans_queue(struct sk_buff *skb, @@@ -1631,7 -1561,11 +1561,11 @@@ struct sk_buff *)); int xfrm_output_resume(struct sk_buff *skb, int err); int xfrm_output(struct sock *sk, struct sk_buff *skb); - int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); + + #if IS_ENABLED(CONFIG_NET_PKTGEN) + int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); + #endif + void xfrm_local_error(struct sk_buff *skb, int mtu); int xfrm4_extract_header(struct sk_buff *skb); int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); @@@ -1650,10 -1584,8 +1584,8 @@@ static inline int xfrm4_rcv_spi(struct }
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); - int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); - int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); @@@ -1669,7 -1601,6 +1601,6 @@@ int xfrm6_rcv(struct sk_buff *skb) int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto); void xfrm6_local_error(struct sk_buff *skb, u32 mtu); - int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err); int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol); int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol); int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); @@@ -1677,7 -1608,6 +1608,6 @@@ int xfrm6_tunnel_deregister(struct xfrm __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); - int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, @@@ -2069,7 -1999,7 +1999,7 @@@ static inline int xfrm_tunnel_check(str tunnel = true; break; } - if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)) + if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)) return -EINVAL;
return 0; diff --combined kernel/bpf/core.c index c605397c79f0,ace8c22c8b0e..3ba56e73c90e --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@@ -292,7 -292,8 +292,8 @@@ int bpf_prog_calc_tag(struct bpf_prog * dst[i] = fp->insnsi[i]; if (!was_ld_map && dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && - dst[i].src_reg == BPF_PSEUDO_MAP_FD) { + (dst[i].src_reg == BPF_PSEUDO_MAP_FD || + dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { was_ld_map = true; dst[i].imm = 0; } else if (was_ld_map && @@@ -438,6 -439,7 +439,7 @@@ struct bpf_prog *bpf_patch_insn_single( u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; const u32 cnt_max = S16_MAX; struct bpf_prog *prog_adj; + int err;
/* Since our patchlet doesn't expand the image, we're done. */ if (insn_delta == 0) { @@@ -453,8 -455,8 +455,8 @@@ * we afterwards may not fail anymore. */ if (insn_adj_cnt > cnt_max && - bpf_adj_branches(prog, off, off + 1, off + len, true)) - return NULL; + (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) + return ERR_PTR(err);
/* Several new instructions need to be inserted. Make room * for them. Likely, there's no need for a new allocation as @@@ -463,7 -465,7 +465,7 @@@ prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), GFP_USER); if (!prog_adj) - return NULL; + return ERR_PTR(-ENOMEM);
prog_adj->len = insn_adj_cnt;
@@@ -848,6 -850,7 +850,6 @@@ void __weak bpf_jit_free(struct bpf_pro if (fp->jited) { struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
- bpf_jit_binary_unlock_ro(hdr); bpf_jit_binary_free(hdr);
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); @@@ -1095,13 -1098,13 +1097,13 @@@ struct bpf_prog *bpf_jit_blind_constant continue;
tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); - if (!tmp) { + if (IS_ERR(tmp)) { /* Patching may have repointed aux->prog during * realloc from the original one, so we need to * fix it up here on error. */ bpf_jit_prog_release_other(prog, clone); - return ERR_PTR(-ENOMEM); + return tmp; }
clone = tmp; diff --combined kernel/time/time.c index 86656bbac232,9e3f79d4f5a8..7f7d6914ddd5 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@@ -171,7 -171,7 +171,7 @@@ int do_sys_settimeofday64(const struct static int firsttime = 1; int error = 0;
- if (tv && !timespec64_valid(tv)) + if (tv && !timespec64_valid_settod(tv)) return -EINVAL;
error = security_settime64(tv, tz); @@@ -783,6 -783,16 +783,16 @@@ u64 jiffies64_to_nsecs(u64 j } EXPORT_SYMBOL(jiffies64_to_nsecs);
+ u64 jiffies64_to_msecs(const u64 j) + { + #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; + #else + return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); + #endif + } + EXPORT_SYMBOL(jiffies64_to_msecs); + /** * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 * diff --combined kernel/trace/bpf_trace.c index 94b0e37d90ef,8607aba1d882..b496ffdf5f36 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@@ -14,8 -14,6 +14,8 @@@ #include <linux/syscalls.h> #include <linux/error-injection.h>
+#include <asm/tlb.h> + #include "trace_probe.h" #include "trace.h"
@@@ -165,10 -163,6 +165,10 @@@ BPF_CALL_3(bpf_probe_write_user, void * * access_ok() should prevent writing to non-user memory, but in * some situations (nommu, temporary switch, etc) access_ok() does * not provide enough validation, hence the check on KERNEL_DS. + * + * nmi_uaccess_okay() ensures the probe is not run in an interim + * state, when the task or mm are switched. This is specifically + * required to prevent the use of temporary mm. */
if (unlikely(in_interrupt() || @@@ -176,8 -170,6 +176,8 @@@ return -EPERM; if (unlikely(uaccess_kernel())) return -EPERM; + if (unlikely(!nmi_uaccess_okay())) + return -EPERM; if (!access_ok(unsafe_ptr, size)) return -EPERM;
@@@ -577,6 -569,12 +577,12 @@@ tracing_func_proto(enum bpf_func_id fun return &bpf_map_update_elem_proto; case BPF_FUNC_map_delete_elem: return &bpf_map_delete_elem_proto; + case BPF_FUNC_map_push_elem: + return &bpf_map_push_elem_proto; + case BPF_FUNC_map_pop_elem: + return &bpf_map_pop_elem_proto; + case BPF_FUNC_map_peek_elem: + return &bpf_map_peek_elem_proto; case BPF_FUNC_probe_read: return &bpf_probe_read_proto; case BPF_FUNC_ktime_get_ns: @@@ -917,6 -915,27 +923,27 @@@ const struct bpf_verifier_ops raw_trace const struct bpf_prog_ops raw_tracepoint_prog_ops = { };
+ static bool raw_tp_writable_prog_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) + { + if (off == 0) { + if (size != sizeof(u64) || type != BPF_READ) + return false; + info->reg_type = PTR_TO_TP_BUFFER; + } + return raw_tp_prog_is_valid_access(off, size, type, prog, info); + } + + const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { + .get_func_proto = raw_tp_prog_func_proto, + .is_valid_access = raw_tp_writable_prog_is_valid_access, + }; + + const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { + }; + static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) @@@ -1206,6 -1225,9 +1233,9 @@@ static int __bpf_probe_register(struct if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) return -EINVAL;
+ if (prog->aux->max_tp_access > btp->writable_size) + return -EINVAL; + return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); }
diff --combined lib/Kconfig index 27261e506ae8,ac1fcf06d8ea..f871282e0612 --- a/lib/Kconfig +++ b/lib/Kconfig @@@ -18,6 -18,23 +18,23 @@@ config RAID6_PQ_BENCHMAR Benchmark all available RAID6 PQ functions on init and choose the fastest one.
+ config PACKING + bool "Generic bitfield packing and unpacking" + default n + help + This option provides the packing() helper function, which permits + converting bitfields between a CPU-usable representation and a + memory representation that can have any combination of these quirks: + - Is little endian (bytes are reversed within a 32-bit group) + - The least-significant 32-bit word comes first (within a 64-bit + group) + - The most significant bit of a byte is at its right (bit 0 of a + register description is numerically 2^7). + Drivers may use these helpers to match the bit indices as described + in the data sheets of the peripherals they are in control of. + + When in doubt, say N. + config BITREVERSE tristate
@@@ -591,20 -608,12 +608,20 @@@ config ARCH_NO_SG_CHAI config ARCH_HAS_PMEM_API bool
+# use memcpy to implement user copies for nommu architectures +config UACCESS_MEMCPY + bool + config ARCH_HAS_UACCESS_FLUSHCACHE bool
config ARCH_HAS_UACCESS_MCSAFE bool
+# Temporary. Goes away when all archs are cleaned up +config ARCH_STACKWALK + bool + config STACKDEPOT bool select STACKTRACE diff --combined lib/Kconfig.debug index 4c54a89f06ee,8ed7d276fe7d..c7f4947e8a61 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@@ -219,6 -219,14 +219,14 @@@ config DEBUG_INFO_DWARF But it significantly improves the success of resolving variables in gdb on optimized code.
+ config DEBUG_INFO_BTF + bool "Generate BTF typeinfo" + depends on DEBUG_INFO + help + Generate deduplicated BTF type information from DWARF debug info. + Turning this on expects presence of pahole tool, which will convert + DWARF type info into equivalent deduplicated BTF type info. + config GDB_SCRIPTS bool "Provide GDB scripts for kernel debugging" depends on DEBUG_INFO @@@ -1769,9 -1777,6 +1777,9 @@@ config TEST_HEXDUM config TEST_STRING_HELPERS tristate "Test functions located in the string_helpers module at runtime"
+config TEST_STRSCPY + tristate "Test strscpy*() family of functions at runtime" + config TEST_KSTRTOX tristate "Test kstrto*() family of functions at runtime"
diff --combined lib/Makefile index 07506e3891a0,7d4db18fabf1..83d7df2661ff --- a/lib/Makefile +++ b/lib/Makefile @@@ -17,17 -17,6 +17,17 @@@ KCOV_INSTRUMENT_list_debug.o := KCOV_INSTRUMENT_debugobjects.o := n KCOV_INSTRUMENT_dynamic_debug.o := n
+# Early boot use of cmdline, don't instrument it +ifdef CONFIG_AMD_MEM_ENCRYPT +KASAN_SANITIZE_string.o := n + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_string.o = -pg +endif + +CFLAGS_string.o := $(call cc-option, -fno-stack-protector) +endif + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ idr.o int_sqrt.o extable.o \ @@@ -81,7 -70,6 +81,7 @@@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o +obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_XARRAY) += test_xarray.o @@@ -120,6 -108,7 +120,7 @@@ obj-$(CONFIG_DEBUG_LIST) += list_debug. obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
obj-$(CONFIG_BITREVERSE) += bitrev.o + obj-$(CONFIG_PACKING) += packing.o obj-$(CONFIG_RATIONAL) += rational.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC16) += crc16.o @@@ -280,7 -269,6 +281,7 @@@ obj-$(CONFIG_UCS2_STRING) += ucs2_strin obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n +CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --combined net/batman-adv/icmp_socket.c index 3ff32125f4b5,de81b5ecad91..0a91c8661357 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@@ -2,18 -2,6 +2,6 @@@ /* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors: * * Marek Lindner - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see http://www.gnu.org/licenses/. */
#include "icmp_socket.h" @@@ -77,7 -65,7 +65,7 @@@ static int batadv_socket_open(struct in
batadv_debugfs_deprecated(file, "");
- nonseekable_open(inode, file); + stream_open(inode, file);
socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); if (!socket_client) { diff --combined net/batman-adv/log.c index e8ff13598c08,60ce11e16a90..f79ebd5b46e9 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@@ -2,18 -2,6 +2,6 @@@ /* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors: * * Marek Lindner - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see http://www.gnu.org/licenses/. */
#include "log.h" @@@ -102,7 -90,7 +90,7 @@@ static int batadv_log_open(struct inod batadv_debugfs_deprecated(file, "Use tracepoint batadv:batadv_dbg instead\n");
- nonseekable_open(inode, file); + stream_open(inode, file); file->private_data = inode->i_private; return 0; } diff --combined net/core/net-sysfs.c index 530e5b04b97d,e4fd68389d6f..d9c4360257ce --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@@ -754,9 -754,9 +754,9 @@@ static ssize_t store_rps_map(struct net rcu_assign_pointer(queue->rps_map, map);
if (map) - static_key_slow_inc(&rps_needed); + static_branch_inc(&rps_needed); if (old_map) - static_key_slow_dec(&rps_needed); + static_branch_dec(&rps_needed);
mutex_unlock(&rps_map_mutex);
@@@ -863,7 -863,6 +863,7 @@@ static struct attribute *rx_queue_defau #endif NULL }; +ATTRIBUTE_GROUPS(rx_queue_default);
static void rx_queue_release(struct kobject *kobj) { @@@ -912,7 -911,7 +912,7 @@@ static void rx_queue_get_ownership(stru static struct kobj_type rx_queue_ktype __ro_after_init = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, - .default_attrs = rx_queue_default_attrs, + .default_groups = rx_queue_default_groups, .namespace = rx_queue_namespace, .get_ownership = rx_queue_get_ownership, }; @@@ -1417,7 -1416,6 +1417,7 @@@ static struct attribute *netdev_queue_d #endif NULL }; +ATTRIBUTE_GROUPS(netdev_queue_default);
static void netdev_queue_release(struct kobject *kobj) { @@@ -1450,7 -1448,7 +1450,7 @@@ static void netdev_queue_get_ownership( static struct kobj_type netdev_queue_ktype __ro_after_init = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, - .default_attrs = netdev_queue_default_attrs, + .default_groups = netdev_queue_default_groups, .namespace = netdev_queue_namespace, .get_ownership = netdev_queue_get_ownership, }; diff --combined net/core/netpoll.c index bf5446192d6a,e365e8fb1c40..a0f05416657b --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@@ -149,7 -149,7 +149,7 @@@ static void poll_one_napi(struct napi_s * indicate that we are clearing the Tx path only. */ work = napi->poll(napi, 0); - WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll); + WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); trace_napi_poll(napi, work, 0);
clear_bit(NAPI_STATE_NPSVC, &napi->state); @@@ -323,7 -323,7 +323,7 @@@ void netpoll_send_skb_on_dev(struct net if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq;
- txq = netdev_pick_tx(dev, skb, NULL); + txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; @@@ -346,7 -346,7 +346,7 @@@ }
WARN_ONCE(!irqs_disabled(), - "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n", dev->name, dev->netdev_ops->ndo_start_xmit);
} diff --combined net/netfilter/nf_conntrack_netlink.c index d2715b4d2e72,8dcc064d518d..7db79c1b8084 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@@ -46,7 -46,7 +46,7 @@@ #include <net/netfilter/nf_conntrack_timestamp.h> #include <net/netfilter/nf_conntrack_labels.h> #include <net/netfilter/nf_conntrack_synproxy.h> - #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #endif @@@ -63,7 -63,7 +63,7 @@@ static int ctnetlink_dump_tuples_proto( int ret = 0; struct nlattr *nest_parms;
- nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO); if (!nest_parms) goto nla_put_failure; if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) @@@ -104,7 -104,7 +104,7 @@@ static int ctnetlink_dump_tuples_ip(str int ret = 0; struct nlattr *nest_parms;
- nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_IP); if (!nest_parms) goto nla_put_failure;
@@@ -187,7 -187,7 +187,7 @@@ static int ctnetlink_dump_protoinfo(str if (!l4proto->to_nlattr) return 0;
- nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED); + nest_proto = nla_nest_start(skb, CTA_PROTOINFO); if (!nest_proto) goto nla_put_failure;
@@@ -215,7 -215,7 +215,7 @@@ static int ctnetlink_dump_helpinfo(stru if (!helper) goto out;
- nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); + nest_helper = nla_nest_start(skb, CTA_HELP); if (!nest_helper) goto nla_put_failure; if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) @@@ -249,7 -249,7 +249,7 @@@ dump_counters(struct sk_buff *skb, stru bytes = atomic64_read(&counter[dir].bytes); }
- nest_count = nla_nest_start(skb, attr | NLA_F_NESTED); + nest_count = nla_nest_start(skb, attr); if (!nest_count) goto nla_put_failure;
@@@ -293,7 -293,7 +293,7 @@@ ctnetlink_dump_timestamp(struct sk_buf if (!tstamp) return 0;
- nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED); + nest_count = nla_nest_start(skb, CTA_TIMESTAMP); if (!nest_count) goto nla_put_failure;
@@@ -337,7 -337,7 +337,7 @@@ static int ctnetlink_dump_secctx(struc return 0;
ret = -1; - nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED); + nest_secctx = nla_nest_start(skb, CTA_SECCTX); if (!nest_secctx) goto nla_put_failure;
@@@ -397,7 -397,7 +397,7 @@@ static int ctnetlink_dump_master(struc if (!(ct->status & IPS_EXPECTED)) return 0;
- nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) @@@ -415,7 -415,7 +415,7 @@@ dump_ct_seq_adj(struct sk_buff *skb, co { struct nlattr *nest_parms;
- nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, type); if (!nest_parms) goto nla_put_failure;
@@@ -467,7 -467,7 +467,7 @@@ static int ctnetlink_dump_ct_synproxy(s if (!synproxy) return 0;
- nest_parms = nla_nest_start(skb, CTA_SYNPROXY | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_SYNPROXY); if (!nest_parms) goto nla_put_failure;
@@@ -528,7 -528,7 +528,7 @@@ ctnetlink_fill_info(struct sk_buff *skb
zone = nf_ct_zone(ct);
- nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) @@@ -538,7 -538,7 +538,7 @@@ goto nla_put_failure; nla_nest_end(skb, nest_parms);
- nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) @@@ -658,7 -658,7 +658,7 @@@ static size_t ctnetlink_nlmsg_size(cons + nla_total_size(0) /* CTA_HELP */ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ + ctnetlink_secctx_size(ct) - #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ #endif @@@ -720,7 -720,7 +720,7 @@@ ctnetlink_conntrack_event(unsigned int
zone = nf_ct_zone(ct);
- nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) @@@ -730,7 -730,7 +730,7 @@@ goto nla_put_failure; nla_nest_end(skb, nest_parms);
- nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) @@@ -1020,12 -1020,12 +1020,12 @@@ static int ctnetlink_parse_tuple_ip(str struct nlattr *tb[CTA_IP_MAX+1]; int ret = 0;
- ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL, NULL); + ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL); if (ret < 0) return ret;
- ret = nla_validate_nested(attr, CTA_IP_MAX, - cta_ip_nla_policy, NULL); + ret = nla_validate_nested_deprecated(attr, CTA_IP_MAX, + cta_ip_nla_policy, NULL); if (ret) return ret;
@@@ -1052,8 -1052,8 +1052,8 @@@ static int ctnetlink_parse_tuple_proto( struct nlattr *tb[CTA_PROTO_MAX+1]; int ret = 0;
- ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy, - NULL); + ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr, + proto_nla_policy, NULL); if (ret < 0) return ret;
@@@ -1065,8 -1065,9 +1065,9 @@@ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
if (likely(l4proto->nlattr_to_tuple)) { - ret = nla_validate_nested(attr, CTA_PROTO_MAX, - l4proto->nla_policy, NULL); + ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX, + l4proto->nla_policy, + NULL); if (ret == 0) ret = l4proto->nlattr_to_tuple(tb, tuple); } @@@ -1129,8 -1130,8 +1130,8 @@@ ctnetlink_parse_tuple(const struct nlat
memset(tuple, 0, sizeof(*tuple));
- err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy, - NULL); + err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type], + tuple_nla_policy, NULL); if (err < 0) return err;
@@@ -1180,7 -1181,8 +1181,8 @@@ static int ctnetlink_parse_help(const s int err; struct nlattr *tb[CTA_HELP_MAX+1];
- err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy, NULL); + err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr, + help_nla_policy, NULL); if (err < 0) return err;
@@@ -1254,7 -1256,7 +1256,7 @@@ static int ctnetlink_del_conntrack(stru struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct nfgenmsg *nfmsg = nlmsg_data(nlh); - u_int8_t u3 = nfmsg->nfgen_family; + u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC; struct nf_conntrack_zone zone; int err;
@@@ -1498,7 -1500,7 +1500,7 @@@ static int ctnetlink_get_ct_unconfirmed return -EOPNOTSUPP; }
- #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) static int ctnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, @@@ -1590,7 -1592,7 +1592,7 @@@ ctnetlink_change_status(struct nf_conn static int ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) { - #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) int ret;
if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) @@@ -1721,8 -1723,8 +1723,8 @@@ static int ctnetlink_change_protoinfo(s struct nlattr *tb[CTA_PROTOINFO_MAX+1]; int err = 0;
- err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy, - NULL); + err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr, + protoinfo_policy, NULL); if (err < 0) return err;
@@@ -1745,7 -1747,8 +1747,8 @@@ static int change_seq_adj(struct nf_ct_ int err; struct nlattr *cda[CTA_SEQADJ_MAX+1];
- err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy, NULL); + err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr, + seqadj_policy, NULL); if (err < 0) return err;
@@@ -1822,8 -1825,9 +1825,9 @@@ static int ctnetlink_change_synproxy(st if (!synproxy) return 0;
- err = nla_parse_nested(tb, CTA_SYNPROXY_MAX, cda[CTA_SYNPROXY], - synproxy_policy, NULL); + err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX, + cda[CTA_SYNPROXY], synproxy_policy, + NULL); if (err < 0) return err;
@@@ -2373,7 -2377,7 +2377,7 @@@ ctnetlink_glue_build_size(const struct + nla_total_size(0) /* CTA_HELP */ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ + ctnetlink_secctx_size(ct) - #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ #endif @@@ -2400,7 -2404,7 +2404,7 @@@ static int __ctnetlink_glue_build(struc
zone = nf_ct_zone(ct);
- nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) @@@ -2410,7 -2414,7 +2414,7 @@@ goto nla_put_failure; nla_nest_end(skb, nest_parms);
- nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) @@@ -2472,7 -2476,7 +2476,7 @@@ ctnetlink_glue_build(struct sk_buff *sk { struct nlattr *nest_parms;
- nest_parms = nla_nest_start(skb, ct_attr | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, ct_attr); if (!nest_parms) goto nla_put_failure;
@@@ -2553,7 -2557,8 +2557,8 @@@ ctnetlink_glue_parse(const struct nlatt struct nlattr *cda[CTA_MAX+1]; int ret;
- ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy, NULL); + ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy, + NULL); if (ret < 0) return ret;
@@@ -2586,8 -2591,8 +2591,8 @@@ ctnetlink_glue_attach_expect(const stru struct nf_conntrack_expect *exp; int err;
- err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy, - NULL); + err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr, + exp_nla_policy, NULL); if (err < 0) return err;
@@@ -2644,7 -2649,7 +2649,7 @@@ static int ctnetlink_exp_dump_tuple(str { struct nlattr *nest_parms;
- nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, type); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple) < 0) @@@ -2671,7 -2676,7 +2676,7 @@@ static int ctnetlink_exp_dump_mask(stru m.src.u.all = mask->src.u.all; m.dst.protonum = tuple->dst.protonum;
- nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK); if (!nest_parms) goto nla_put_failure;
@@@ -2722,7 -2727,7 +2727,7 @@@ ctnetlink_exp_dump_expect(struct sk_buf struct nf_conn *master = exp->master; long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; struct nf_conn_help *help; - #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) struct nlattr *nest_parms; struct nf_conntrack_tuple nat_tuple = {}; #endif @@@ -2740,10 -2745,10 +2745,10 @@@ CTA_EXPECT_MASTER) < 0) goto nla_put_failure;
- #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) || exp->saved_proto.all) { - nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED); + nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT); if (!nest_parms) goto nla_put_failure;
@@@ -3204,13 -3209,13 +3209,13 @@@ ctnetlink_parse_expect_nat(const struc struct nf_conntrack_expect *exp, u_int8_t u3) { - #ifdef CONFIG_NF_NAT_NEEDED + #if IS_ENABLED(CONFIG_NF_NAT) struct nlattr *tb[CTA_EXPECT_NAT_MAX+1]; struct nf_conntrack_tuple nat_tuple = {}; int err;
- err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, - exp_nat_nla_policy, NULL); + err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr, + exp_nat_nla_policy, NULL); if (err < 0) return err;
diff --combined net/netfilter/nf_flow_table_ip.c index 46022a2867d7,6452550d187f..0d603e20b519 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@@ -181,9 -181,6 +181,9 @@@ static int nf_flow_tuple_ip(struct sk_b iph->protocol != IPPROTO_UDP) return -1;
+ if (iph->ttl <= 1) + return -1; + thoff = iph->ihl * 4; if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; @@@ -238,13 -235,10 +238,10 @@@ nf_flow_offload_ip_hook(void *priv, str if (tuplehash == NULL) return NF_ACCEPT;
- outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx); - if (!outdev) - return NF_ACCEPT; - dir = tuplehash->tuple.dir; flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache; + outdev = rt->dst.dev;
if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)) && (ip_hdr(skb)->frag_off & htons(IP_DF)) != 0) @@@ -414,9 -408,6 +411,9 @@@ static int nf_flow_tuple_ipv6(struct sk ip6h->nexthdr != IPPROTO_UDP) return -1;
+ if (ip6h->hop_limit <= 1) + return -1; + thoff = sizeof(*ip6h); if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; @@@ -458,13 -449,10 +455,10 @@@ nf_flow_offload_ipv6_hook(void *priv, s if (tuplehash == NULL) return NF_ACCEPT;
- outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx); - if (!outdev) - return NF_ACCEPT; - dir = tuplehash->tuple.dir; flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache; + outdev = rt->dst.dev;
if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu))) return NF_ACCEPT; diff --combined net/netfilter/nf_tables_api.c index e4f6ecac48c3,d98416e83d4e..28241e82fd15 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -53,7 -53,6 +53,6 @@@ static const struct rhashtable_params n .hashfn = nft_chain_hash, .obj_hashfn = nft_chain_hash_obj, .obj_cmpfn = nft_chain_hash_cmp, - .locks_mul = 1, .automatic_shrinking = true, };
@@@ -214,33 -213,33 +213,33 @@@ static int nft_deltable(struct nft_ctx return err; }
-static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) +static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); if (trans == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM);
if (msg_type == NFT_MSG_NEWCHAIN) nft_activate_next(ctx->net, ctx->chain);
list_add_tail(&trans->list, &ctx->net->nft.commit_list); - return 0; + return trans; }
static int nft_delchain(struct nft_ctx *ctx) { - int err; + struct nft_trans *trans;
- err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); - if (err < 0) - return err; + trans = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); + if (IS_ERR(trans)) + return PTR_ERR(trans);
ctx->table->use--; nft_deactivate_next(ctx->net, ctx->chain);
- return err; + return 0; }
static void nft_rule_expr_activate(const struct nft_ctx *ctx, @@@ -1190,9 -1189,6 +1189,9 @@@ static int nft_dump_stats(struct sk_buf u64 pkts, bytes; int cpu;
+ if (!stats) + return 0; + memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); @@@ -1204,7 -1200,7 +1203,7 @@@ total.pkts += pkts; total.bytes += bytes; } - nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); + nest = nla_nest_start_noflag(skb, NFTA_CHAIN_COUNTERS); if (nest == NULL) goto nla_put_failure;
@@@ -1250,10 -1246,9 +1249,10 @@@ static int nf_tables_fill_chain_info(st if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops; + struct nft_stats __percpu *stats; struct nlattr *nest;
- nest = nla_nest_start(skb, NFTA_CHAIN_HOOK); + nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK); if (nest == NULL) goto nla_put_failure; if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum))) @@@ -1272,9 -1267,8 +1271,9 @@@ if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure;
- if (rcu_access_pointer(basechain->stats) && - nft_dump_stats(skb, rcu_dereference(basechain->stats))) + stats = rcu_dereference_check(basechain->stats, + lockdep_commit_lock_is_held(net)); + if (nft_dump_stats(skb, stats)) goto nla_put_failure; }
@@@ -1426,8 -1420,8 +1425,8 @@@ static struct nft_stats __percpu *nft_s struct nft_stats *stats; int err;
- err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy, - NULL); + err = nla_parse_nested_deprecated(tb, NFTA_COUNTER_MAX, attr, + nft_counter_policy, NULL); if (err < 0) return ERR_PTR(err);
@@@ -1531,8 -1525,9 +1530,9 @@@ static int nft_chain_parse_hook(struct lockdep_assert_held(&net->nft.commit_mutex); lockdep_nfnl_nft_mutex_not_held();
- err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK], - nft_hook_policy, NULL); + err = nla_parse_nested_deprecated(ha, NFTA_HOOK_MAX, + nla[NFTA_CHAIN_HOOK], + nft_hook_policy, NULL); if (err < 0) return err;
@@@ -1620,7 -1615,6 +1620,7 @@@ static int nf_tables_addchain(struct nf struct nft_base_chain *basechain; struct nft_stats __percpu *stats; struct net *net = ctx->net; + struct nft_trans *trans; struct nft_chain *chain; struct nft_rule **rules; int err; @@@ -1668,7 -1662,7 +1668,7 @@@ ops->dev = hook.dev;
chain->flags |= NFT_BASE_CHAIN; - basechain->policy = policy; + basechain->policy = NF_ACCEPT; } else { chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) @@@ -1704,18 -1698,13 +1704,18 @@@ if (err) goto err2;
- err = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); - if (err < 0) { + trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); rhltable_remove(&table->chains_ht, &chain->rhlhead, nft_chain_ht_params); goto err2; }
+ nft_trans_chain_policy(trans) = -1; + if (nft_is_base_chain(chain)) + nft_trans_chain_policy(trans) = policy; + table->use++; list_add_tail_rcu(&chain->list, &table->chains);
@@@ -2071,7 -2060,8 +2071,8 @@@ static int nf_tables_fill_expr_info(str goto nla_put_failure;
if (expr->ops->dump) { - struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA); + struct nlattr *data = nla_nest_start_noflag(skb, + NFTA_EXPR_DATA); if (data == NULL) goto nla_put_failure; if (expr->ops->dump(skb, expr) < 0) @@@ -2090,7 -2080,7 +2091,7 @@@ int nft_expr_dump(struct sk_buff *skb, { struct nlattr *nest;
- nest = nla_nest_start(skb, attr); + nest = nla_nest_start_noflag(skb, attr); if (!nest) goto nla_put_failure; if (nf_tables_fill_expr_info(skb, expr) < 0) @@@ -2116,7 -2106,8 +2117,8 @@@ static int nf_tables_expr_parse(const s struct nlattr *tb[NFTA_EXPR_MAX + 1]; int err;
- err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy, NULL); + err = nla_parse_nested_deprecated(tb, NFTA_EXPR_MAX, nla, + nft_expr_policy, NULL); if (err < 0) return err;
@@@ -2125,8 -2116,9 +2127,9 @@@ return PTR_ERR(type);
if (tb[NFTA_EXPR_DATA]) { - err = nla_parse_nested(info->tb, type->maxattr, - tb[NFTA_EXPR_DATA], type->policy, NULL); + err = nla_parse_nested_deprecated(info->tb, type->maxattr, + tb[NFTA_EXPR_DATA], + type->policy, NULL); if (err < 0) goto err1; } else @@@ -2301,7 -2293,7 +2304,7 @@@ static int nf_tables_fill_rule_info(str goto nla_put_failure; }
- list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS); + list = nla_nest_start_noflag(skb, NFTA_RULE_EXPRESSIONS); if (list == NULL) goto nla_put_failure; nft_rule_for_each_expr(expr, next, rule) { @@@ -3205,9 -3197,7 +3208,7 @@@ static int nf_msecs_to_jiffies64(const
static __be64 nf_jiffies64_to_msecs(u64 input) { - u64 ms = jiffies64_to_nsecs(input); - - return cpu_to_be64(div_u64(ms, NSEC_PER_MSEC)); + return cpu_to_be64(jiffies64_to_msecs(input)); }
static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, @@@ -3272,7 -3262,7 +3273,7 @@@ if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure;
- desc = nla_nest_start(skb, NFTA_SET_DESC); + desc = nla_nest_start_noflag(skb, NFTA_SET_DESC); if (desc == NULL) goto nla_put_failure; if (set->size && @@@ -3450,15 -3440,14 +3451,14 @@@ err return err; }
- static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, - struct nft_set_desc *desc, + static int nf_tables_set_desc_parse(struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *da[NFTA_SET_DESC_MAX + 1]; int err;
- err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, - nft_set_desc_policy, NULL); + err = nla_parse_nested_deprecated(da, NFTA_SET_DESC_MAX, nla, + nft_set_desc_policy, NULL); if (err < 0) return err;
@@@ -3577,7 -3566,7 +3577,7 @@@ static int nf_tables_newset(struct net policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
if (nla[NFTA_SET_DESC] != NULL) { - err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]); + err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]); if (err < 0) return err; } @@@ -3797,8 -3786,8 +3797,8 @@@ bind } EXPORT_SYMBOL_GPL(nf_tables_bind_set);
- void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding, bool event) + static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding, bool event) { list_del_rcu(&binding->list);
@@@ -3809,7 -3798,6 +3809,6 @@@ GFP_KERNEL); } } - EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, @@@ -3924,7 -3912,7 +3923,7 @@@ static int nf_tables_fill_setelem(struc unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest;
- nest = nla_nest_start(skb, NFTA_LIST_ELEM); + nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM); if (nest == NULL) goto nla_put_failure;
@@@ -4068,7 -4056,7 +4067,7 @@@ static int nf_tables_dump_set(struct sk if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) goto nla_put_failure;
- nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS); + nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_LIST_ELEMENTS); if (nest == NULL) goto nla_put_failure;
@@@ -4140,7 -4128,7 +4139,7 @@@ static int nf_tables_fill_setelem_info( if (nla_put_string(skb, NFTA_SET_NAME, set->name)) goto nla_put_failure;
- nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS); + nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_LIST_ELEMENTS); if (nest == NULL) goto nla_put_failure;
@@@ -4185,8 -4173,8 +4184,8 @@@ static int nft_get_set_elem(struct nft_ void *priv; int err;
- err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, - nft_set_elem_policy, NULL); + err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr, + nft_set_elem_policy, NULL); if (err < 0) return err;
@@@ -4417,8 -4405,8 +4416,8 @@@ static int nft_add_set_elem(struct nft_ u8 ulen; int err;
- err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, - nft_set_elem_policy, NULL); + err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr, + nft_set_elem_policy, NULL); if (err < 0) return err;
@@@ -4711,8 -4699,8 +4710,8 @@@ static int nft_del_setelem(struct nft_c void *priv; int err;
- err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, - nft_set_elem_policy, NULL); + err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr, + nft_set_elem_policy, NULL); if (err < 0) goto err1;
@@@ -4986,8 -4974,8 +4985,8 @@@ static struct nft_object *nft_obj_init( goto err1;
if (attr) { - err = nla_parse_nested(tb, type->maxattr, attr, type->policy, - NULL); + err = nla_parse_nested_deprecated(tb, type->maxattr, attr, + type->policy, NULL); if (err < 0) goto err2; } else { @@@ -5030,7 -5018,7 +5029,7 @@@ static int nft_object_dump(struct sk_bu { struct nlattr *nest;
- nest = nla_nest_start(skb, attr); + nest = nla_nest_start_noflag(skb, attr); if (!nest) goto nla_put_failure; if (obj->ops->dump(skb, obj, reset) < 0) @@@ -5563,8 -5551,8 +5562,8 @@@ static int nf_tables_flowtable_parse_ho int hooknum, priority; int err, n = 0, i;
- err = nla_parse_nested(tb, NFTA_FLOWTABLE_HOOK_MAX, attr, - nft_flowtable_hook_policy, NULL); + err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr, + nft_flowtable_hook_policy, NULL); if (err < 0) return err;
@@@ -5847,14 -5835,14 +5846,14 @@@ static int nf_tables_fill_flowtable_inf NFTA_FLOWTABLE_PAD)) goto nla_put_failure;
- nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK); + nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK); if (!nest) goto nla_put_failure; if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) || nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority))) goto nla_put_failure;
- nest_devs = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK_DEVS); + nest_devs = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK_DEVS); if (!nest_devs) goto nla_put_failure;
@@@ -6322,27 -6310,6 +6321,27 @@@ static int nf_tables_validate(struct ne return 0; }
+/* a drop policy has to be deferred until all rules have been activated, + * otherwise a large ruleset that contains a drop-policy base chain will + * cause all packets to get dropped until the full transaction has been + * processed. + * + * We defer the drop policy until the transaction has been finalized. + */ +static void nft_chain_commit_drop_policy(struct nft_trans *trans) +{ + struct nft_base_chain *basechain; + + if (nft_trans_chain_policy(trans) != NF_DROP) + return; + + if (!nft_is_base_chain(trans->ctx.chain)) + return; + + basechain = nft_base_chain(trans->ctx.chain); + basechain->policy = NF_DROP; +} + static void nft_chain_commit_update(struct nft_trans *trans) { struct nft_base_chain *basechain; @@@ -6664,7 -6631,6 +6663,7 @@@ static int nf_tables_commit(struct net nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); /* trans destroyed after rcu grace period */ } else { + nft_chain_commit_drop_policy(trans); nft_clear(net, trans->ctx.chain); nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); nft_trans_destroy(trans); @@@ -7243,8 -7209,8 +7242,8 @@@ static int nft_verdict_init(const struc struct nft_chain *chain; int err;
- err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy, - NULL); + err = nla_parse_nested_deprecated(tb, NFTA_VERDICT_MAX, nla, + nft_verdict_policy, NULL); if (err < 0) return err;
@@@ -7302,7 -7268,7 +7301,7 @@@ int nft_verdict_dump(struct sk_buff *sk { struct nlattr *nest;
- nest = nla_nest_start(skb, type); + nest = nla_nest_start_noflag(skb, type); if (!nest) goto nla_put_failure;
@@@ -7374,7 -7340,8 +7373,8 @@@ int nft_data_init(const struct nft_ctx struct nlattr *tb[NFTA_DATA_MAX + 1]; int err;
- err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy, NULL); + err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla, + nft_data_policy, NULL); if (err < 0) return err;
@@@ -7415,7 -7382,7 +7415,7 @@@ int nft_data_dump(struct sk_buff *skb, struct nlattr *nest; int err;
- nest = nla_nest_start(skb, attr); + nest = nla_nest_start_noflag(skb, attr); if (nest == NULL) return -1;
@@@ -7567,6 -7534,7 +7567,7 @@@ static int __init nf_tables_module_init if (err < 0) goto err5;
+ nft_chain_route_init(); return err; err5: rhltable_destroy(&nft_objname_ht); @@@ -7586,6 -7554,7 +7587,7 @@@ static void __exit nf_tables_module_exi nfnetlink_subsys_unregister(&nf_tables_subsys); unregister_netdevice_notifier(&nf_tables_flowtable_notifier); nft_chain_filter_fini(); + nft_chain_route_fini(); unregister_pernet_subsys(&nf_tables_net_ops); cancel_work_sync(&trans_destroy_work); rcu_barrier(); diff --combined net/xfrm/xfrm_policy.c index a6b58df7a70f,410233c5681e..7a43ae6b2a44 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@@ -27,10 -27,14 +27,14 @@@ #include <linux/cpu.h> #include <linux/audit.h> #include <linux/rhashtable.h> + #include <linux/if_tunnel.h> #include <net/dst.h> #include <net/flow.h> #include <net/xfrm.h> #include <net/ip.h> + #if IS_ENABLED(CONFIG_IPV6_MIP6) + #include <net/mip6.h> + #endif #ifdef CONFIG_XFRM_STATISTICS #include <net/snmp.h> #endif @@@ -2450,18 -2454,10 +2454,10 @@@ xfrm_tmpl_resolve(struct xfrm_policy **
static int xfrm_get_tos(const struct flowi *fl, int family) { - const struct xfrm_policy_afinfo *afinfo; - int tos; + if (family == AF_INET) + return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
- afinfo = xfrm_policy_get_afinfo(family); - if (!afinfo) - return 0; - - tos = afinfo->get_tos(fl); - - rcu_read_unlock(); - - return tos; + return 0; }
static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) @@@ -2499,21 -2495,14 +2495,14 @@@ return xdst; }
- static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, - int nfheader_len) + static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, + int nfheader_len) { - const struct xfrm_policy_afinfo *afinfo = - xfrm_policy_get_afinfo(dst->ops->family); - int err; - - if (!afinfo) - return -EINVAL; - - err = afinfo->init_path(path, dst, nfheader_len); - - rcu_read_unlock(); - - return err; + if (dst->ops->family == AF_INET6) { + struct rt6_info *rt = (struct rt6_info *)dst; + path->path_cookie = rt6_get_cookie(rt); + path->u.rt6.rt6i_nfheader_len = nfheader_len; + } }
static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, @@@ -2545,10 -2534,11 +2534,11 @@@ static struct dst_entry *xfrm_bundle_cr const struct flowi *fl, struct dst_entry *dst) { + const struct xfrm_state_afinfo *afinfo; + const struct xfrm_mode *inner_mode; struct net *net = xp_net(policy); unsigned long now = jiffies; struct net_device *dev; - struct xfrm_mode *inner_mode; struct xfrm_dst *xdst_prev = NULL; struct xfrm_dst *xdst0 = NULL; int i = 0; @@@ -2594,7 -2584,7 +2584,7 @@@ goto put_states; } } else - inner_mode = xfrm[i]->inner_mode; + inner_mode = &xfrm[i]->inner_mode;
xdst->route = dst; dst_copy_metrics(dst1, dst); @@@ -2622,7 -2612,14 +2612,14 @@@ dst1->lastuse = now;
dst1->input = dst_discard; - dst1->output = inner_mode->afinfo->output; + + rcu_read_lock(); + afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family); + if (likely(afinfo)) + dst1->output = afinfo->output; + else + dst1->output = dst_discard_out; + rcu_read_unlock();
xdst_prev = xdst;
@@@ -3263,20 -3260,229 +3260,231 @@@ xfrm_policy_ok(const struct xfrm_tmpl * return start; }
+ static void + decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) + { + const struct iphdr *iph = ip_hdr(skb); - u8 *xprth = skb_network_header(skb) + iph->ihl * 4; ++ int ihl = iph->ihl; ++ u8 *xprth = skb_network_header(skb) + ihl * 4; + struct flowi4 *fl4 = &fl->u.ip4; + int oif = 0; + + if (skb_dst(skb)) + oif = skb_dst(skb)->dev->ifindex; + + memset(fl4, 0, sizeof(struct flowi4)); + fl4->flowi4_mark = skb->mark; + fl4->flowi4_oif = reverse ? skb->skb_iif : oif; + ++ fl4->flowi4_proto = iph->protocol; ++ fl4->daddr = reverse ? iph->saddr : iph->daddr; ++ fl4->saddr = reverse ? iph->daddr : iph->saddr; ++ fl4->flowi4_tos = iph->tos; ++ + if (!ip_is_fragment(iph)) { + switch (iph->protocol) { + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + case IPPROTO_TCP: + case IPPROTO_SCTP: + case IPPROTO_DCCP: + if (xprth + 4 < skb->data || + pskb_may_pull(skb, xprth + 4 - skb->data)) { + __be16 *ports; + - xprth = skb_network_header(skb) + iph->ihl * 4; ++ xprth = skb_network_header(skb) + ihl * 4; + ports = (__be16 *)xprth; + + fl4->fl4_sport = ports[!!reverse]; + fl4->fl4_dport = ports[!reverse]; + } + break; + case IPPROTO_ICMP: + if (xprth + 2 < skb->data || + pskb_may_pull(skb, xprth + 2 - skb->data)) { + u8 *icmp; + - xprth = skb_network_header(skb) + iph->ihl * 4; ++ xprth = skb_network_header(skb) + ihl * 4; + icmp = xprth; + + fl4->fl4_icmp_type = icmp[0]; + fl4->fl4_icmp_code = icmp[1]; + } + break; + case IPPROTO_ESP: + if (xprth + 4 < skb->data || + pskb_may_pull(skb, xprth + 4 - skb->data)) { + __be32 *ehdr; + - xprth = skb_network_header(skb) + iph->ihl * 4; ++ xprth = skb_network_header(skb) + ihl * 4; + ehdr = (__be32 *)xprth; + + fl4->fl4_ipsec_spi = ehdr[0]; + } + break; + case IPPROTO_AH: + if (xprth + 8 < skb->data || + pskb_may_pull(skb, xprth + 8 - skb->data)) { + __be32 *ah_hdr; + - xprth = skb_network_header(skb) + iph->ihl * 4; ++ xprth = skb_network_header(skb) + ihl * 4; + ah_hdr = (__be32 *)xprth; + + fl4->fl4_ipsec_spi = ah_hdr[1]; + } + break; + case IPPROTO_COMP: + if (xprth + 4 < skb->data || + pskb_may_pull(skb, xprth + 4 - skb->data)) { + __be16 *ipcomp_hdr; + - xprth = skb_network_header(skb) + iph->ihl * 4; ++ xprth = skb_network_header(skb) + ihl * 4; + ipcomp_hdr = (__be16 *)xprth; + + fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); + } + break; + case IPPROTO_GRE: + if (xprth + 12 < skb->data || + pskb_may_pull(skb, xprth + 12 - skb->data)) { + __be16 *greflags; + __be32 *gre_hdr; + - xprth = skb_network_header(skb) + iph->ihl * 4; ++ xprth = skb_network_header(skb) + ihl * 4; + greflags = (__be16 *)xprth; + gre_hdr = (__be32 *)xprth; + + if (greflags[0] & GRE_KEY) { + if (greflags[0] & GRE_CSUM) + gre_hdr++; + fl4->fl4_gre_key = gre_hdr[1]; + } + } + break; + default: + fl4->fl4_ipsec_spi = 0; + break; + } + } - fl4->flowi4_proto = iph->protocol; - fl4->daddr = reverse ? iph->saddr : iph->daddr; - fl4->saddr = reverse ? iph->daddr : iph->saddr; - fl4->flowi4_tos = iph->tos; + } + + #if IS_ENABLED(CONFIG_IPV6) + static void + decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) + { + struct flowi6 *fl6 = &fl->u.ip6; + int onlyproto = 0; + const struct ipv6hdr *hdr = ipv6_hdr(skb); + u32 offset = sizeof(*hdr); + struct ipv6_opt_hdr *exthdr; + const unsigned char *nh = skb_network_header(skb); + u16 nhoff = IP6CB(skb)->nhoff; + int oif = 0; + u8 nexthdr; + + if (!nhoff) + nhoff = offsetof(struct ipv6hdr, nexthdr); + + nexthdr = nh[nhoff]; + + if (skb_dst(skb)) + oif = skb_dst(skb)->dev->ifindex; + + memset(fl6, 0, sizeof(struct flowi6)); + fl6->flowi6_mark = skb->mark; + fl6->flowi6_oif = reverse ? skb->skb_iif : oif; + + fl6->daddr = reverse ? hdr->saddr : hdr->daddr; + fl6->saddr = reverse ? hdr->daddr : hdr->saddr; + + while (nh + offset + sizeof(*exthdr) < skb->data || + pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) { + nh = skb_network_header(skb); + exthdr = (struct ipv6_opt_hdr *)(nh + offset); + + switch (nexthdr) { + case NEXTHDR_FRAGMENT: + onlyproto = 1; + /* fall through */ + case NEXTHDR_ROUTING: + case NEXTHDR_HOP: + case NEXTHDR_DEST: + offset += ipv6_optlen(exthdr); + nexthdr = exthdr->nexthdr; + exthdr = (struct ipv6_opt_hdr *)(nh + offset); + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + case IPPROTO_TCP: + case IPPROTO_SCTP: + case IPPROTO_DCCP: + if (!onlyproto && (nh + offset + 4 < skb->data || + pskb_may_pull(skb, nh + offset + 4 - skb->data))) { + __be16 *ports; + + nh = skb_network_header(skb); + ports = (__be16 *)(nh + offset); + fl6->fl6_sport = ports[!!reverse]; + fl6->fl6_dport = ports[!reverse]; + } + fl6->flowi6_proto = nexthdr; + return; + case IPPROTO_ICMPV6: + if (!onlyproto && (nh + offset + 2 < skb->data || + pskb_may_pull(skb, nh + offset + 2 - skb->data))) { + u8 *icmp; + + nh = skb_network_header(skb); + icmp = (u8 *)(nh + offset); + fl6->fl6_icmp_type = icmp[0]; + fl6->fl6_icmp_code = icmp[1]; + } + fl6->flowi6_proto = nexthdr; + return; + #if IS_ENABLED(CONFIG_IPV6_MIP6) + case IPPROTO_MH: + offset += ipv6_optlen(exthdr); + if (!onlyproto && (nh + offset + 3 < skb->data || + pskb_may_pull(skb, nh + offset + 3 - skb->data))) { + struct ip6_mh *mh; + + nh = skb_network_header(skb); + mh = (struct ip6_mh *)(nh + offset); + fl6->fl6_mh_type = mh->ip6mh_type; + } + fl6->flowi6_proto = nexthdr; + return; + #endif + /* XXX Why are there these headers? */ + case IPPROTO_AH: + case IPPROTO_ESP: + case IPPROTO_COMP: + default: + fl6->fl6_ipsec_spi = 0; + fl6->flowi6_proto = nexthdr; + return; + } + } + } + #endif + int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { - const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); - int err; - - if (unlikely(afinfo == NULL)) + switch (family) { + case AF_INET: + decode_session4(skb, fl, reverse); + break; + #if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + decode_session6(skb, fl, reverse); + break; + #endif + default: return -EAFNOSUPPORT; + }
- afinfo->decode_session(skb, fl, reverse); - - err = security_xfrm_decode_session(skb, &fl->flowi_secid); - rcu_read_unlock(); - return err; + return security_xfrm_decode_session(skb, &fl->flowi_secid); } EXPORT_SYMBOL(__xfrm_decode_session);
diff --combined net/xfrm/xfrm_state.c index c62f712fdaf7,3edbf4b26116..c5d81316330b --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@@ -173,7 -173,7 +173,7 @@@ static DEFINE_SPINLOCK(xfrm_state_gc_lo int __xfrm_state_delete(struct xfrm_state *x);
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); - bool km_is_alive(const struct km_event *c); + static bool km_is_alive(const struct km_event *c); void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
static DEFINE_SPINLOCK(xfrm_type_lock); @@@ -330,100 -330,67 +330,67 @@@ static void xfrm_put_type_offload(cons module_put(type->owner); }
- static DEFINE_SPINLOCK(xfrm_mode_lock); - int xfrm_register_mode(struct xfrm_mode *mode, int family) - { - struct xfrm_state_afinfo *afinfo; - struct xfrm_mode **modemap; - int err; - - if (unlikely(mode->encap >= XFRM_MODE_MAX)) - return -EINVAL; - - afinfo = xfrm_state_get_afinfo(family); - if (unlikely(afinfo == NULL)) - return -EAFNOSUPPORT; - - err = -EEXIST; - modemap = afinfo->mode_map; - spin_lock_bh(&xfrm_mode_lock); - if (modemap[mode->encap]) - goto out; - - err = -ENOENT; - if (!try_module_get(afinfo->owner)) - goto out; - - mode->afinfo = afinfo; - modemap[mode->encap] = mode; - err = 0; - - out: - spin_unlock_bh(&xfrm_mode_lock); - rcu_read_unlock(); - return err; - } - EXPORT_SYMBOL(xfrm_register_mode); - - int xfrm_unregister_mode(struct xfrm_mode *mode, int family) - { - struct xfrm_state_afinfo *afinfo; - struct xfrm_mode **modemap; - int err; - - if (unlikely(mode->encap >= XFRM_MODE_MAX)) - return -EINVAL; - - afinfo = xfrm_state_get_afinfo(family); - if (unlikely(afinfo == NULL)) - return -EAFNOSUPPORT; - - err = -ENOENT; - modemap = afinfo->mode_map; - spin_lock_bh(&xfrm_mode_lock); - if (likely(modemap[mode->encap] == mode)) { - modemap[mode->encap] = NULL; - module_put(mode->afinfo->owner); - err = 0; - } - - spin_unlock_bh(&xfrm_mode_lock); - rcu_read_unlock(); - return err; - } - EXPORT_SYMBOL(xfrm_unregister_mode); - - static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) - { - struct xfrm_state_afinfo *afinfo; - struct xfrm_mode *mode; - int modload_attempted = 0; + static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { + [XFRM_MODE_BEET] = { + .encap = XFRM_MODE_BEET, + .flags = XFRM_MODE_FLAG_TUNNEL, + .family = AF_INET, + }, + [XFRM_MODE_TRANSPORT] = { + .encap = XFRM_MODE_TRANSPORT, + .family = AF_INET, + }, + [XFRM_MODE_TUNNEL] = { + .encap = XFRM_MODE_TUNNEL, + .flags = XFRM_MODE_FLAG_TUNNEL, + .family = AF_INET, + }, + }; + + static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = { + [XFRM_MODE_BEET] = { + .encap = XFRM_MODE_BEET, + .flags = XFRM_MODE_FLAG_TUNNEL, + .family = AF_INET6, + }, + [XFRM_MODE_ROUTEOPTIMIZATION] = { + .encap = XFRM_MODE_ROUTEOPTIMIZATION, + .family = AF_INET6, + }, + [XFRM_MODE_TRANSPORT] = { + .encap = XFRM_MODE_TRANSPORT, + .family = AF_INET6, + }, + [XFRM_MODE_TUNNEL] = { + .encap = XFRM_MODE_TUNNEL, + .flags = XFRM_MODE_FLAG_TUNNEL, + .family = AF_INET6, + }, + }; + + static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) + { + const struct xfrm_mode *mode;
if (unlikely(encap >= XFRM_MODE_MAX)) return NULL;
- retry: - afinfo = xfrm_state_get_afinfo(family); - if (unlikely(afinfo == NULL)) - return NULL; - - mode = READ_ONCE(afinfo->mode_map[encap]); - if (unlikely(mode && !try_module_get(mode->owner))) - mode = NULL; - - rcu_read_unlock(); - if (!mode && !modload_attempted) { - request_module("xfrm-mode-%d-%d", family, encap); - modload_attempted = 1; - goto retry; + switch (family) { + case AF_INET: + mode = &xfrm4_mode_map[encap]; + if (mode->family == family) + return mode; + break; + case AF_INET6: + mode = &xfrm6_mode_map[encap]; + if (mode->family == family) + return mode; + break; + default: + break; }
- return mode; - } - - static void xfrm_put_mode(struct xfrm_mode *mode) - { - module_put(mode->owner); + return NULL; }
void xfrm_state_free(struct xfrm_state *x) @@@ -434,7 -401,7 +401,7 @@@ EXPORT_SYMBOL(xfrm_state_free)
static void ___xfrm_state_destroy(struct xfrm_state *x) { - tasklet_hrtimer_cancel(&x->mtimer); + hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); kfree(x->aead); kfree(x->aalg); @@@ -444,12 -411,6 +411,6 @@@ kfree(x->coaddr); kfree(x->replay_esn); kfree(x->preplay_esn); - if (x->inner_mode) - xfrm_put_mode(x->inner_mode); - if (x->inner_mode_iaf) - xfrm_put_mode(x->inner_mode_iaf); - if (x->outer_mode) - xfrm_put_mode(x->outer_mode); if (x->type_offload) xfrm_put_type_offload(x->type_offload); if (x->type) { @@@ -479,8 -440,8 +440,8 @@@ static void xfrm_state_gc_task(struct w
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) { - struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); - struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); + struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer); + enum hrtimer_restart ret = HRTIMER_NORESTART; time64_t now = ktime_get_real_seconds(); time64_t next = TIME64_MAX; int warn = 0; @@@ -544,8 -505,7 +505,8 @@@ km_state_expired(x, 0, 0); resched: if (next != TIME64_MAX) { - tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL); + hrtimer_forward_now(&x->mtimer, ktime_set(next, 0)); + ret = HRTIMER_RESTART; }
goto out; @@@ -562,7 -522,7 +523,7 @@@ expired
out: spin_unlock(&x->lock); - return HRTIMER_NORESTART; + return ret; }
static void xfrm_replay_timer_handler(struct timer_list *t); @@@ -581,8 -541,8 +542,8 @@@ struct xfrm_state *xfrm_state_alloc(str INIT_HLIST_NODE(&x->bydst); INIT_HLIST_NODE(&x->bysrc); INIT_HLIST_NODE(&x->byspi); - tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, - CLOCK_BOOTTIME, HRTIMER_MODE_ABS); + hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT); + x->mtimer.function = xfrm_timer_handler; timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); x->curlft.add_time = ktime_get_real_seconds(); x->lft.soft_byte_limit = XFRM_INF; @@@ -591,8 -551,6 +552,6 @@@ x->lft.hard_packet_limit = XFRM_INF; x->replay_maxage = 0; x->replay_maxdiff = 0; - x->inner_mode = NULL; - x->inner_mode_iaf = NULL; spin_lock_init(&x->lock); } return x; @@@ -1048,9 -1006,7 +1007,9 @@@ found hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); } x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; - tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, + ktime_set(net->xfrm.sysctl_acq_expires, 0), + HRTIMER_MODE_REL_SOFT); net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); spin_unlock_bh(&net->xfrm.xfrm_state_lock); @@@ -1162,7 -1118,7 +1121,7 @@@ static void __xfrm_state_insert(struct hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); }
- tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); if (x->replay_maxage) mod_timer(&x->rtimer, jiffies + x->replay_maxage);
@@@ -1269,9 -1225,7 +1228,9 @@@ static struct xfrm_state *__find_acq_co x->mark.m = m->m; x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; xfrm_state_hold(x); - tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, + ktime_set(net->xfrm.sysctl_acq_expires, 0), + HRTIMER_MODE_REL_SOFT); list_add(&x->km.all, &net->xfrm.state_all); hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h = xfrm_src_hash(net, daddr, saddr, family); @@@ -1576,8 -1530,7 +1535,8 @@@ out memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); x1->km.dying = 0;
- tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); + hrtimer_start(&x1->mtimer, ktime_set(1, 0), + HRTIMER_MODE_REL_SOFT); if (x1->curlft.use_time) xfrm_state_check_expire(x1);
@@@ -1616,7 -1569,7 +1575,7 @@@ int xfrm_state_check_expire(struct xfrm if (x->curlft.bytes >= x->lft.hard_byte_limit || x->curlft.packets >= x->lft.hard_packet_limit) { x->km.state = XFRM_STATE_EXPIRED; - tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); return -EINVAL; }
@@@ -2072,7 -2025,7 +2031,7 @@@ int km_report(struct net *net, u8 proto } EXPORT_SYMBOL(km_report);
- bool km_is_alive(const struct km_event *c) + static bool km_is_alive(const struct km_event *c) { struct xfrm_mgr *km; bool is_alive = false; @@@ -2088,7 -2041,6 +2047,6 @@@
return is_alive; } - EXPORT_SYMBOL(km_is_alive);
int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) { @@@ -2201,6 -2153,7 +2159,7 @@@ struct xfrm_state_afinfo *xfrm_state_af
return rcu_dereference(xfrm_state_afinfo[family]); } + EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) { @@@ -2248,8 -2201,9 +2207,9 @@@ int xfrm_state_mtu(struct xfrm_state *x
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) { - struct xfrm_state_afinfo *afinfo; - struct xfrm_mode *inner_mode; + const struct xfrm_state_afinfo *afinfo; + const struct xfrm_mode *inner_mode; + const struct xfrm_mode *outer_mode; int family = x->props.family; int err;
@@@ -2275,25 -2229,22 +2235,22 @@@ goto error;
if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && - family != x->sel.family) { - xfrm_put_mode(inner_mode); + family != x->sel.family) goto error; - }
- x->inner_mode = inner_mode; + x->inner_mode = *inner_mode; } else { - struct xfrm_mode *inner_mode_iaf; + const struct xfrm_mode *inner_mode_iaf; int iafamily = AF_INET;
inner_mode = xfrm_get_mode(x->props.mode, x->props.family); if (inner_mode == NULL) goto error;
- if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) { - xfrm_put_mode(inner_mode); + if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) goto error; - } - x->inner_mode = inner_mode; + + x->inner_mode = *inner_mode;
if (x->props.family == AF_INET) iafamily = AF_INET6; @@@ -2301,9 -2252,7 +2258,7 @@@ inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily); if (inner_mode_iaf) { if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL) - x->inner_mode_iaf = inner_mode_iaf; - else - xfrm_put_mode(inner_mode_iaf); + x->inner_mode_iaf = *inner_mode_iaf; } }
@@@ -2317,12 -2266,13 +2272,13 @@@ if (err) goto error;
- x->outer_mode = xfrm_get_mode(x->props.mode, family); - if (x->outer_mode == NULL) { + outer_mode = xfrm_get_mode(x->props.mode, family); + if (!outer_mode) { err = -EPROTONOSUPPORT; goto error; }
+ x->outer_mode = *outer_mode; if (init_replay) { err = xfrm_init_replay(x); if (err) diff --combined scripts/link-vmlinux.sh index e4383e0f476e,e3c06b9482a2..a7124f895b24 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@@ -35,7 -35,7 +35,7 @@@ set - info() { if [ "${quiet}" != "silent_" ]; then - printf " %-7s %s\n" ${1} ${2} + printf " %-7s %s\n" "${1}" "${2}" fi }
@@@ -91,6 -91,25 +91,25 @@@ vmlinux_link( fi }
+ # generate .BTF typeinfo from DWARF debuginfo + gen_btf() + { + local pahole_ver; + + if ! [ -x "$(command -v ${PAHOLE})" ]; then + info "BTF" "${1}: pahole (${PAHOLE}) is not available" + return 0 + fi + + pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+).([0-9]+)/\1\2/') + if [ "${pahole_ver}" -lt "113" ]; then + info "BTF" "${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13" + return 0 + fi + + info "BTF" ${1} + LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} + }
# Create ${2} .o file with all symbols from the ${1} object file kallsyms() @@@ -193,9 -212,6 +212,9 @@@ modpost_link vmlinux. # modpost vmlinux.o to check for section mismatches ${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+info MODINFO modules.builtin.modinfo +${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo + kallsymso="" kallsyms_vmlinux="" if [ -n "${CONFIG_KALLSYMS}" ]; then @@@ -251,6 -267,10 +270,10 @@@ f info LD vmlinux vmlinux_link "${kallsymso}" vmlinux
+ if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then + gen_btf vmlinux + fi + if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then info SORTEX vmlinux sortextable vmlinux