The following commit has been merged in the master branch: commit 867484d92e088c97f69129c221254bb47a8a0b52 Merge: 1828dfc6f2e5db7d6fbb928421718d211e84045d a8eceea84a3a3504e42f6495cf462027c5d19cb0 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Fri Mar 13 13:41:16 2020 +1100
Merge remote-tracking branch 'net-next/master'
# Conflicts: # drivers/net/ethernet/mscc/ocelot.c # fs/sysfs/group.c # net/ipv4/inet_diag.c
diff --combined MAINTAINERS index 61d6eb186d71,6918c3f0ff1c..a0f1f4757930 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -693,7 -693,7 +693,7 @@@ ALLWINNER CPUFREQ DRIVE M: Yangtao Li tiny.windzz@gmail.com L: linux-pm@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt +F: Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
ALLWINNER CRYPTO DRIVERS @@@ -2238,8 -2238,6 +2238,8 @@@ M: Andreas Färber <afaerber@suse.de L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-realtek-soc@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: arch/arm/boot/dts/rtd* +F: arch/arm/mach-realtek/ F: arch/arm64/boot/dts/realtek/ F: Documentation/devicetree/bindings/arm/realtek.yaml
@@@ -2959,14 -2957,6 +2959,14 @@@ S: Maintaine F: Documentation/devicetree/bindings/sound/axentia,* F: sound/soc/atmel/tse850-pcm5142.c
+AXI-FAN-CONTROL HARDWARE MONITOR DRIVER +M: Nuno Sá nuno.sa@analog.com +W: http://ez.analog.com/community/linux-device-drivers +L: linux-hwmon@vger.kernel.org +S: Supported +F: drivers/hwmon/axi-fan-control.c +F: Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml + AXXIA I2C CONTROLLER M: Krzysztof Adamski krzysztof.adamski@nokia.com L: linux-i2c@vger.kernel.org @@@ -3441,7 -3431,7 +3441,7 @@@ L: linux-i2c@vger.kernel.or L: bcm-kernel-feedback-list@broadcom.com S: Supported F: drivers/i2c/busses/i2c-brcmstb.c -F: Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt +F: Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER M: Al Cooper alcooperx@gmail.com @@@ -4027,12 -4017,12 +4027,12 @@@ M: Cheng-Yi Chiang <cychiang@chromium.o S: Maintained R: Enric Balletbo i Serra enric.balletbo@collabora.com R: Guenter Roeck groeck@chromium.org -F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt +F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml F: sound/soc/codecs/cros_ec_codec.*
CIRRUS LOGIC AUDIO CODEC DRIVERS -M: Brian Austin brian.austin@cirrus.com -M: Paul Handrigan Paul.Handrigan@cirrus.com +M: James Schulman james.schulman@cirrus.com +M: David Rhodes david.rhodes@cirrus.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/codecs/cs* @@@ -4083,6 -4073,7 +4083,6 @@@ F: drivers/scsi/snic CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti benve@cisco.com M: Govindarajulu Varadarajan _govind@gmx.com -M: Parvi Kaustubhi pkaustub@cisco.com S: Supported F: drivers/net/ethernet/cisco/enic/
@@@ -4128,7 -4119,6 +4128,7 @@@ B: https://github.com/ClangBuiltLinux/l C: irc://chat.freenode.net/clangbuiltlinux S: Supported K: \b(?i:clang|llvm)\b +F: Documentation/kbuild/llvm.rst
CLEANCACHE API M: Konrad Rzeszutek Wilk konrad.wilk@oracle.com @@@ -4485,7 -4475,7 +4485,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/sunxi/sun6i-csi/ -F: Documentation/devicetree/bindings/media/sun6i-csi.txt +F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
CW1200 WLAN driver M: Solomon Peachy pizza@shaftnet.org @@@ -4582,7 -4572,7 +4582,7 @@@ F: drivers/infiniband/hw/cxgb4 F: include/uapi/rdma/cxgb4-abi.h
CXGB4VF ETHERNET DRIVER (CXGB4VF) -M: Casey Leedom leedom@chelsio.com +M: Vishal Kulkarni vishal@gmail.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported @@@ -5212,7 -5202,7 +5212,7 @@@ M: Greg Kroah-Hartman <gregkh@linuxfoun R: "Rafael J. Wysocki" rafael@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported -F: Documentation/kobject.txt +F: Documentation/core-api/kobject.rst F: drivers/base/ F: fs/debugfs/ F: fs/sysfs/ @@@ -5678,7 -5668,7 +5678,7 @@@ L: dri-devel@lists.freedesktop.or T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/stm -F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt +F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
DRM DRIVERS FOR TI LCDC M: Jyri Sarha jsarha@ti.com @@@ -6208,6 -6198,7 +6208,6 @@@ S: Supporte F: drivers/scsi/be2iscsi/
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) -M: Sathya Perla sathya.perla@broadcom.com M: Ajit Khaparde ajit.khaparde@broadcom.com M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Somnath Kotur somnath.kotur@broadcom.com @@@ -6315,13 -6306,6 +6315,13 @@@ F: include/trace/events/mdio. F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h
+EXFAT FILE SYSTEM +M: Namjae Jeon namjae.jeon@samsung.com +M: Sungjong Seo sj1557.seo@samsung.com +L: linux-fsdevel@vger.kernel.org +S: Maintained +F: fs/exfat/ + EXFAT FILE SYSTEM M: Valdis Kletnieks valdis.kletnieks@vt.edu L: linux-fsdevel@vger.kernel.org @@@ -6951,7 -6935,7 +6951,7 @@@ S: Maintaine F: scripts/gcc-plugins/ F: scripts/gcc-plugin.sh F: scripts/Makefile.gcc-plugins -F: Documentation/core-api/gcc-plugins.rst +F: Documentation/kbuild/gcc-plugins.rst
GASKET DRIVER FRAMEWORK M: Rob Springer rspringer@google.com @@@ -7754,7 -7738,7 +7754,7 @@@ Hyper-V CORE AND DRIVER M: "K. Y. Srinivasan" kys@microsoft.com M: Haiyang Zhang haiyangz@microsoft.com M: Stephen Hemminger sthemmin@microsoft.com -M: Sasha Levin sashal@kernel.org +M: Wei Liu wei.liu@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git L: linux-hyperv@vger.kernel.org S: Supported @@@ -8494,6 -8478,7 +8494,6 @@@ L: dmaengine@vger.kernel.or S: Supported F: drivers/dma/idxd/* F: include/uapi/linux/idxd.h -F: include/linux/idxd.h
INTEL IDLE DRIVER M: Jacob Pan jacob.jun.pan@linux.intel.com @@@ -8585,15 -8570,15 +8585,15 @@@ M: Ashutosh Dixit <ashutosh.dixit@intel S: Supported W: https://github.com/sudeepdutt/mic W: http://software.intel.com/en-us/mic-developer +F: Documentation/misc-devices/mic/ +F: drivers/dma/mic_x100_dma.c +F: drivers/dma/mic_x100_dma.h +F: drivers/misc/mic/ F: include/linux/mic_bus.h F: include/linux/scif.h F: include/uapi/linux/mic_common.h F: include/uapi/linux/mic_ioctl.h F: include/uapi/linux/scif_ioctl.h -F: drivers/misc/mic/ -F: drivers/dma/mic_x100_dma.c -F: drivers/dma/mic_x100_dma.h -F: Documentation/mic/
INTEL PMC CORE DRIVER M: Rajneesh Bhardwaj rajneesh.bhardwaj@intel.com @@@ -8700,7 -8685,7 +8700,7 @@@ M: Emmanuel Grumbach <emmanuel.grumbach M: Luca Coelho luciano.coelho@intel.com M: Intel Linux Wireless linuxwifi@intel.com L: linux-wireless@vger.kernel.org -W: http://intellinuxwireless.org +W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported F: drivers/net/wireless/intel/iwlwifi/ @@@ -10179,7 -10164,7 +10179,7 @@@ MAXBOTIX ULTRASONIC RANGER IIO DRIVE M: Andreas Klinger ak@it-klinger.de L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt +F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER @@@ -10251,13 -10236,6 +10251,13 @@@ F: drivers/net/can/m_can/m_can. F: drivers/net/can/m_can/m_can.h F: drivers/net/can/m_can/m_can_platform.c
+MCP2221A MICROCHIP USB-HID TO I2C BRIDGE DRIVER +M: Rishi Gupta gupt21@gmail.com +L: linux-i2c@vger.kernel.org +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-mcp2221.c + MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS M: Peter Rosin peda@axentia.se L: linux-iio@vger.kernel.org @@@ -10489,7 -10467,7 +10489,7 @@@ M: Hugues Fruchet <hugues.fruchet@st.co L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported -F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt +F: Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml F: drivers/media/platform/stm32/stm32-dcmi.c
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE @@@ -10967,7 -10945,6 +10967,7 @@@ F: drivers/media/platform/atmel/atmel-i F: drivers/media/platform/atmel/atmel-isc-base.c F: drivers/media/platform/atmel/atmel-isc-regs.h F: Documentation/devicetree/bindings/media/atmel-isc.txt +F: include/linux/atmel-isc-media.h
MICROCHIP ISI DRIVER M: Eugen Hristev eugen.hristev@microchip.com @@@ -11142,7 -11119,7 +11142,7 @@@ M: Thomas Bogendoerfer <tsbogend@alpha. L: linux-mips@vger.kernel.org W: http://www.linux-mips.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git -Q: http://patchwork.linux-mips.org/project/linux-mips/list/ +Q: https://patchwork.kernel.org/project/linux-mips/list/ S: Maintained F: Documentation/devicetree/bindings/mips/ F: Documentation/mips/ @@@ -12762,7 -12739,7 +12762,7 @@@ M: Tom Joseph <tjoseph@cadence.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt -F: drivers/pci/controller/pcie-cadence* +F: drivers/pci/controller/cadence/
PCI DRIVER FOR FREESCALE LAYERSCAPE M: Minghuan Lian minghuan.Lian@nxp.com @@@ -12774,14 -12751,6 +12774,14 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained F: drivers/pci/controller/dwc/*layerscape*
+PCI DRIVER FOR NXP LAYERSCAPE GEN4 CONTROLLER +M: Hou Zhiqiang Zhiqiang.Hou@nxp.com +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt +F: drivers/pci/controller/mobibeil/pcie-layerscape-gen4.c + PCI DRIVER FOR GENERIC OF HOSTS M: Will Deacon will@kernel.org L: linux-pci@vger.kernel.org @@@ -12824,7 -12793,7 +12824,7 @@@ M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt -F: drivers/pci/controller/pcie-mobiveil.c +F: drivers/pci/controller/mobiveil/pcie-mobiveil*
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni thomas.petazzoni@bootlin.com @@@ -12983,6 -12952,7 +12983,6 @@@ M: Robert Richter <rrichter@marvell.com L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported -F: Documentation/devicetree/bindings/pci/pci-thunder-* F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON @@@ -13379,7 -13349,6 +13379,7 @@@ F: Documentation/devicetree/bindings/ii PNP SUPPORT M: "Rafael J. Wysocki" rafael.j.wysocki@intel.com S: Maintained +F: include/linux/pnp.h F: drivers/pnp/
POSIX CLOCKS and TIMERS @@@ -13693,6 -13662,12 +13693,12 @@@ L: alsa-devel@alsa-project.org (moderat S: Supported F: sound/soc/qcom/
+ QCOM IPA DRIVER + M: Alex Elder elder@kernel.org + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/ipa/ + QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT M: Gabriel Somlo somlo@cmu.edu M: "Michael S. Tsirkin" mst@redhat.com @@@ -13928,7 -13903,6 +13934,7 @@@ L: linux-arm-msm@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/qcom/venus/ +F: Documentation/devicetree/bindings/media/*venus*
QUALCOMM WCN36XX WIRELESS DRIVER M: Kalle Valo kvalo@codeaurora.org @@@ -14259,7 -14233,7 +14265,7 @@@ F: include/dt-bindings/reset F: include/linux/reset.h F: include/linux/reset/ F: include/linux/reset-controller.h -K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b +K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
RESTARTABLE SEQUENCES SUPPORT M: Mathieu Desnoyers mathieu.desnoyers@efficios.com @@@ -14392,14 -14366,6 +14398,14 @@@ F: include/net/rose. F: include/uapi/linux/rose.h F: net/rose/
+ROTATION DRIVER FOR ALLWINNER A83T +M: Jernej Skrabec jernej.skrabec@siol.net +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/platform/sunxi/sun8i-rotate/ +F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml + RTL2830 MEDIA DRIVER M: Antti Palosaari crope@iki.fi L: linux-media@vger.kernel.org @@@ -14500,7 -14466,7 +14506,7 @@@ F: Documentation/s390 F: Documentation/driver-api/s390-drivers.rst
S390 COMMON I/O LAYER -M: Sebastian Ott sebott@linux.ibm.com +M: Vineeth Vijayan vneethv@linux.ibm.com M: Peter Oberparleiter oberpar@linux.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@@ -14542,7 -14508,7 +14548,7 @@@ S: Supporte F: drivers/s390/net/
S390 PCI SUBSYSTEM -M: Sebastian Ott sebott@linux.ibm.com +M: Niklas Schnelle schnelle@linux.ibm.com M: Gerald Schaefer gerald.schaefer@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@@ -15555,14 -15521,6 +15561,14 @@@ S: Maintaine F: drivers/media/i2c/imx214.c F: Documentation/devicetree/bindings/media/i2c/sony,imx214.txt
+SONY IMX219 SENSOR DRIVER +M: Dave Stevenson dave.stevenson@raspberrypi.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/imx219.c +F: Documentation/devicetree/bindings/media/i2c/imx219.yaml + SONY IMX258 SENSOR DRIVER M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org @@@ -15664,17 -15622,6 +15670,17 @@@ F: sound/soc F: include/dt-bindings/sound/ F: include/sound/soc*
+SOUND - SOUND OPEN FIRMWARE (SOF) DRIVERS +M: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com +M: Liam Girdwood lgirdwood@gmail.com +M: Ranjani Sridharan ranjani.sridharan@linux.intel.com +M: Kai Vehmanen kai.vehmanen@linux.intel.com +M: Daniel Baluta daniel.baluta@nxp.com +L: sound-open-firmware@alsa-project.org (moderated for non-subscribers) +W: https://github.com/thesofproject/linux/ +S: Supported +F: sound/soc/sof/ + SOUNDWIRE SUBSYSTEM M: Vinod Koul vkoul@kernel.org M: Sanyog Kale sanyog.r.kale@intel.com @@@ -15981,7 -15928,7 +15987,7 @@@ F: drivers/*/stm32-*timer F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* F: Documentation/ABI/testing/*timer-stm32 -F: Documentation/devicetree/bindings/*/stm32-*timer* +F: Documentation/devicetree/bindings/*/*stm32-*timer* F: Documentation/devicetree/bindings/pwm/pwm-stm32*
STMMAC ETHERNET DRIVER @@@ -16140,8 -16087,6 +16146,8 @@@ SYNOPSYS DESIGNWARE 8250 UART DRIVE R: Andy Shevchenko andriy.shevchenko@linux.intel.com S: Maintained F: drivers/tty/serial/8250/8250_dw.c +F: drivers/tty/serial/8250/8250_dwlib.* +F: drivers/tty/serial/8250/8250_lpss.c
SYNOPSYS DESIGNWARE APB GPIO DRIVER M: Hoan Tran hoan@os.amperecomputing.com @@@ -16172,6 -16117,13 +16178,13 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/synopsys/
+ SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER + M: Jose Abreu Jose.Abreu@synopsys.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/phy/mdio-xpcs.c + F: include/linux/mdio-xpcs.h + SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula jarkko.nikula@linux.intel.com R: Andy Shevchenko andriy.shevchenko@linux.intel.com @@@ -16807,12 -16759,12 +16820,12 @@@ F: sound/soc/codecs/twl4030 TI VPE/CAL DRIVERS M: Benoit Parrot bparrot@ti.com L: linux-media@vger.kernel.org +S: Maintained W: http://linuxtv.org/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ -S: Maintained -F: drivers/media/platform/ti-vpe/ +F: Documentation/devicetree/bindings/media/ti,cal.yaml F: Documentation/devicetree/bindings/media/ti,vpe.yaml - Documentation/devicetree/bindings/media/ti,cal.yaml +F: drivers/media/platform/ti-vpe/
TI WILINK WIRELESS DRIVERS L: linux-wireless@vger.kernel.org @@@ -17504,7 -17456,7 +17517,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes -F: drivers/media/usb/usbvision/ +F: drivers/staging/media/usbvision/
USB WEBCAM GADGET M: Laurent Pinchart laurent.pinchart@ideasonboard.com @@@ -17913,6 -17865,13 +17926,13 @@@ S: Supporte F: arch/x86/kernel/cpu/vmware.c F: arch/x86/include/asm/vmware.h
+ VMWARE VIRTUAL PTP CLOCK DRIVER + M: Vivek Thampi vithampi@vmware.com + M: "VMware, Inc." pv-drivers@vmware.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/ptp/ptp_vmw.c + VMWARE PVRDMA DRIVER M: Adit Ranadive aditr@vmware.com M: VMware PV-Drivers pv-drivers@vmware.com diff --combined drivers/base/core.c index dbb0f9130f42,fb8b7990f6fd..befc2722dbfc --- a/drivers/base/core.c +++ b/drivers/base/core.c @@@ -718,8 -718,6 +718,8 @@@ static void __device_links_queue_sync_s { struct device_link *link;
+ if (!dev_has_sync_state(dev)) + return; if (dev->state_synced) return;
@@@ -747,31 -745,25 +747,31 @@@ /** * device_links_flush_sync_list - Call sync_state() on a list of devices * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller * * Calls sync_state() on all the devices that have been queued for it. This - * function is used in conjunction with __device_links_queue_sync_state(). + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held. */ -static void device_links_flush_sync_list(struct list_head *list) +static void device_links_flush_sync_list(struct list_head *list, + struct device *dont_lock_dev) { struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { list_del_init(&dev->links.defer_sync);
- device_lock(dev); + if (dev != dont_lock_dev) + device_lock(dev);
if (dev->bus->sync_state) dev->bus->sync_state(dev); else if (dev->driver && dev->driver->sync_state) dev->driver->sync_state(dev);
- device_unlock(dev); + if (dev != dont_lock_dev) + device_unlock(dev);
put_device(dev); } @@@ -809,7 -801,7 +809,7 @@@ void device_links_supplier_sync_state_r out: device_links_write_unlock();
- device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, NULL); }
static int sync_state_resume_initcall(void) @@@ -821,7 -813,7 +821,7 @@@ late_initcall(sync_state_resume_initcal
static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync)) + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) list_add_tail(&sup->links.defer_sync, &deferred_sync); }
@@@ -873,11 -865,6 +873,11 @@@ void device_links_driver_bound(struct d driver_deferred_probe_add(link->consumer); }
+ if (defer_sync_state_count) + __device_links_supplier_defer_sync(dev); + else + __device_links_queue_sync_state(dev, &sync_list); + list_for_each_entry(link, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; @@@ -896,7 -883,7 +896,7 @@@
device_links_write_unlock();
- device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, dev); }
static void device_link_drop_managed(struct device_link *link) @@@ -3471,6 -3458,126 +3471,126 @@@ out } EXPORT_SYMBOL_GPL(device_move);
+ static int device_attrs_change_owner(struct device *dev, kuid_t kuid, + kgid_t kgid) + { + struct kobject *kobj = &dev->kobj; + struct class *class = dev->class; + const struct device_type *type = dev->type; + int error; + + if (class) { + /* + * Change the device groups of the device class for @dev to + * @kuid/@kgid. + */ + error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, + kgid); + if (error) + return error; + } + + if (type) { + /* + * Change the device groups of the device type for @dev to + * @kuid/@kgid. + */ + error = sysfs_groups_change_owner(kobj, type->groups, kuid, + kgid); + if (error) + return error; + } + + /* Change the device groups of @dev to @kuid/@kgid. */ + error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); + if (error) + return error; + + if (device_supports_offline(dev) && !dev->offline_disabled) { + /* Change online device attributes of @dev to @kuid/@kgid. */ + error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, + kuid, kgid); + if (error) + return error; + } + + return 0; + } + + /** + * device_change_owner - change the owner of an existing device. + * @dev: device. + * @kuid: new owner's kuid + * @kgid: new owner's kgid + * + * This changes the owner of @dev and its corresponding sysfs entries to + * @kuid/@kgid. This function closely mirrors how @dev was added via driver + * core. + * + * Returns 0 on success or error code on failure. + */ + int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) + { + int error; + struct kobject *kobj = &dev->kobj; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + /* + * Change the kobject and the default attributes and groups of the + * ktype associated with it to @kuid/@kgid. + */ + error = sysfs_change_owner(kobj, kuid, kgid); + if (error) + goto out; + + /* + * Change the uevent file for @dev to the new owner. The uevent file + * was created in a separate step when @dev got added and we mirror + * that step here. + */ + error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, + kgid); + if (error) + goto out; + + /* + * Change the device groups, the device groups associated with the + * device class, and the groups associated with the device type of @dev + * to @kuid/@kgid. + */ + error = device_attrs_change_owner(dev, kuid, kgid); + if (error) + goto out; + + error = dpm_sysfs_change_owner(dev, kuid, kgid); + if (error) + goto out; + + #ifdef CONFIG_BLOCK + if (sysfs_deprecated && dev->class == &block_class) + goto out; + #endif + + /* + * Change the owner of the symlink located in the class directory of + * the device class associated with @dev which points to the actual + * directory entry for @dev to @kuid/@kgid. This ensures that the + * symlink shows the same permissions as its target. + */ + error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, + dev_name(dev), kuid, kgid); + if (error) + goto out; + + out: + put_device(dev); + return error; + } + EXPORT_SYMBOL_GPL(device_change_owner); + /** * device_shutdown - call ->shutdown() on each device to shutdown. */ diff --combined drivers/infiniband/hw/mlx5/main.c index 709ef3f57a06,4de380f3d581..d418d38c550d --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -3570,7 -3570,8 +3570,8 @@@ static void mlx5_ib_set_rule_source_por misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_mask()); } else { misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); @@@ -6545,8 -6546,7 +6546,8 @@@ static int mlx5_ib_init_var_table(struc doorbell_bar_offset); bar_size = (1ULL << log_doorbell_bar_size) * 4096; var_table->stride_size = 1ULL << log_doorbell_stride; - var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size); + var_table->num_var_hw_entries = div_u64(bar_size, + var_table->stride_size); mutex_init(&var_table->bitmap_lock); var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries, GFP_KERNEL); @@@ -7078,9 -7078,6 +7079,9 @@@ const struct mlx5_ib_profile raw_eth_pr STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_stage_counters_init, mlx5_ib_stage_counters_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, + mlx5_ib_stage_cong_debugfs_init, + mlx5_ib_stage_cong_debugfs_cleanup), STAGE_CREATE(MLX5_IB_STAGE_UAR, mlx5_ib_stage_uar_init, mlx5_ib_stage_uar_cleanup), diff --combined drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index a47097d4577c,a10a0c2ca2da..67a21fdf5367 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@@ -68,6 -68,9 +68,6 @@@ static void ipoib_get_drvinfo(struct ne strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->version, ipoib_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver)); }
@@@ -210,6 -213,8 +210,8 @@@ static int ipoib_get_link_ksettings(str }
static const struct ethtool_ops ipoib_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES, .get_link_ksettings = ipoib_get_link_ksettings, .get_drvinfo = ipoib_get_drvinfo, .get_coalesce = ipoib_get_coalesce, diff --combined drivers/net/dsa/mv88e6xxx/chip.c index 2f993e673ec7,483db9d133c3..fb4c97a58bd4 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@@ -632,33 -632,78 +632,78 @@@ static void mv88e6xxx_mac_config(struc dev_err(ds->dev, "p%d: failed to configure MAC\n", port); }
- static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link) + static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) { struct mv88e6xxx_chip *chip = ds->priv; - int err; + const struct mv88e6xxx_ops *ops; + int err = 0;
- mv88e6xxx_reg_lock(chip); - err = chip->info->ops->port_set_link(chip, port, link); - mv88e6xxx_reg_unlock(chip); + ops = chip->info->ops;
- if (err) - dev_err(chip->dev, "p%d: failed to force MAC link\n", port); - } + /* Internal PHYs propagate their configuration directly to the MAC. + * External PHYs depend on whether the PPU is enabled for this port. + * FIXME: we should be using the PPU enable state here. What about + * an automedia port? + */ + if (!mv88e6xxx_phy_is_internal(ds, port) && ops->port_set_link) { + mv88e6xxx_reg_lock(chip); + err = ops->port_set_link(chip, port, LINK_FORCED_DOWN); + mv88e6xxx_reg_unlock(chip);
- static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface) - { - if (mode == MLO_AN_FIXED) - mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN); + if (err) + dev_err(chip->dev, + "p%d: failed to force MAC link down\n", port); + } }
static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) { - if (mode == MLO_AN_FIXED) - mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP); + struct mv88e6xxx_chip *chip = ds->priv; + const struct mv88e6xxx_ops *ops; + int err = 0; + + ops = chip->info->ops; + + /* Internal PHYs propagate their configuration directly to the MAC. + * External PHYs depend on whether the PPU is enabled for this port. + * FIXME: we should be using the PPU enable state here. What about + * an automedia port? + */ + if (!mv88e6xxx_phy_is_internal(ds, port)) { + mv88e6xxx_reg_lock(chip); + /* FIXME: for an automedia port, should we force the link + * down here - what if the link comes up due to "other" media + * while we're bringing the port up, how is the exclusivity + * handled in the Marvell hardware? E.g. port 4 on 88E6532 + * shared between internal PHY and Serdes. + */ + if (ops->port_set_speed) { + err = ops->port_set_speed(chip, port, speed); + if (err && err != -EOPNOTSUPP) + goto error; + } + + if (ops->port_set_duplex) { + err = ops->port_set_duplex(chip, port, duplex); + if (err && err != -EOPNOTSUPP) + goto error; + } + + if (ops->port_set_link) + err = ops->port_set_link(chip, port, LINK_FORCED_UP); + error: + mv88e6xxx_reg_unlock(chip); + + if (err && err != -EOPNOTSUPP) + dev_err(ds->dev, + "p%d: failed to configure MAC link up\n", port); + } }
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) @@@ -1018,7 -1063,14 +1063,14 @@@ static void mv88e6xxx_get_ethtool_stats
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) { - return 32 * sizeof(u16); + struct mv88e6xxx_chip *chip = ds->priv; + int len; + + len = 32 * sizeof(u16); + if (chip->info->ops->serdes_get_regs_len) + len += chip->info->ops->serdes_get_regs_len(chip, port); + + return len; }
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, @@@ -1043,6 -1095,9 +1095,9 @@@ p[i] = reg; }
+ if (chip->info->ops->serdes_get_regs) + chip->info->ops->serdes_get_regs(chip, port, &p[i]); + mv88e6xxx_reg_unlock(chip); }
@@@ -1785,7 -1840,7 +1840,7 @@@ static int mv88e6xxx_broadcast_setup(st }
static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, - u16 vid, u8 member) + u16 vid, u8 member, bool warn) { const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; struct mv88e6xxx_vtu_entry vlan; @@@ -1830,7 -1885,7 +1885,7 @@@ err = mv88e6xxx_vtu_loadpurge(chip, &vlan); if (err) return err; - } else { + } else if (warn) { dev_info(chip->dev, "p%d: already a member of VLAN %d\n", port, vid); } @@@ -1844,6 -1899,7 +1899,7 @@@ static void mv88e6xxx_port_vlan_add(str struct mv88e6xxx_chip *chip = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool warn; u8 member; u16 vid;
@@@ -1857,10 -1913,15 +1913,15 @@@ else member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
+ /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port + * and then the CPU port. Do not warn for duplicates for the CPU port. + */ + warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port); + mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) - if (mv88e6xxx_port_vlan_join(chip, port, vid, member)) + if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn)) dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port, vid, untagged ? 'u' : 't');
@@@ -2769,8 -2830,6 +2830,8 @@@ static u64 mv88e6xxx_devlink_atu_bin_ge goto unlock; }
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK; + unlock: mv88e6xxx_reg_unlock(chip);
@@@ -3666,6 -3725,8 +3727,8 @@@ static const struct mv88e6xxx_ops mv88e .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_get_lane = mv88e6352_serdes_get_lane, .serdes_power = mv88e6352_serdes_power, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@@ -3760,6 -3821,8 +3823,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, .serdes_irq_enable = mv88e6352_serdes_irq_enable, .serdes_irq_status = mv88e6352_serdes_irq_status, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@@ -3849,6 -3912,8 +3914,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390_phylink_validate, @@@ -3903,6 -3968,8 +3970,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, @@@ -3956,6 -4023,8 +4025,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@@ -4010,6 -4079,8 +4081,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, .serdes_irq_enable = mv88e6352_serdes_irq_enable, .serdes_irq_status = mv88e6352_serdes_irq_status, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@@ -4105,6 -4176,8 +4178,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, @@@ -4390,6 -4463,8 +4465,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, .serdes_get_strings = mv88e6352_serdes_get_strings, .serdes_get_stats = mv88e6352_serdes_get_stats, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .phylink_validate = mv88e6352_phylink_validate, };
@@@ -4448,6 -4523,8 +4525,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, };
@@@ -4503,6 -4580,8 +4582,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --combined drivers/net/dsa/sja1105/sja1105_main.c index 7edea5741a5f,6fe679143216..d42f085d4272 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@@ -786,7 -786,9 +786,9 @@@ static void sja1105_mac_link_down(struc static void sja1105_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) { sja1105_inhibit_tx(ds->priv, BIT(port), false); } @@@ -822,6 -824,7 +824,7 @@@ static void sja1105_phylink_validate(st phylink_set(mask, MII); phylink_set(mask, 10baseT_Full); phylink_set(mask, 100baseT_Full); + phylink_set(mask, 100baseT1_Full); if (mii->xmii_mode[port] == XMII_MODE_RGMII) phylink_set(mask, 1000baseT_Full);
@@@ -1741,8 -1744,7 +1744,8 @@@ static void sja1105_teardown(struct dsa if (!dsa_is_user_port(ds, port)) continue;
- kthread_destroy_worker(sp->xmit_worker); + if (sp->xmit_worker) + kthread_destroy_worker(sp->xmit_worker); }
sja1105_tas_teardown(ds); diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index c5c8effc0139,4c9696a3978a..663dcf614004 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -70,12 -70,8 +70,8 @@@
#define BNXT_TX_TIMEOUT (5 * HZ)
- static const char version[] = - "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); - MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) #define BNXT_RX_DMA_OFFSET NET_SKB_PAD @@@ -2166,6 -2162,7 +2162,7 @@@ static int __bnxt_poll_work(struct bnx struct tx_cmp *txcmp;
cpr->has_more_work = 0; + cpr->had_work_done = 1; while (1) { int rc;
@@@ -2179,7 -2176,6 +2176,6 @@@ * reading any further. */ dma_rmb(); - cpr->had_work_done = 1; if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ @@@ -2396,7 -2392,7 +2392,7 @@@ static int __bnxt_poll_cqs(struct bnxt }
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, - u64 dbr_type, bool all) + u64 dbr_type) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int i; @@@ -2405,7 -2401,7 +2401,7 @@@ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; struct bnxt_db_info *db;
- if (cpr2 && (all || cpr2->had_work_done)) { + if (cpr2 && cpr2->had_work_done) { db = &cpr2->cp_db; writeq(db->db_key64 | dbr_type | RING_CMP(cpr2->cp_raw_cons), db->doorbell); @@@ -2428,22 -2424,16 +2424,16 @@@ static int bnxt_poll_p5(struct napi_str if (cpr->has_more_work) { cpr->has_more_work = 0; work_done = __bnxt_poll_cqs(bp, bnapi, budget); - if (cpr->has_more_work) { - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); - return work_done; - } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); - if (napi_complete_done(napi, work_done)) - BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); - return work_done; } while (1) { cons = RING_CMP(raw_cons); nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!NQ_CMP_VALID(nqcmp, raw_cons)) { - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, - false); + if (cpr->has_more_work) + break; + + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); cpr->cp_raw_cons = raw_cons; if (napi_complete_done(napi, work_done)) BNXT_DB_NQ_ARM_P5(&cpr->cp_db, @@@ -2463,16 -2453,17 +2453,17 @@@ cpr2 = cpr->cp_ring_arr[idx]; work_done += __bnxt_poll_work(bp, cpr2, budget - work_done); - cpr->has_more_work = cpr2->has_more_work; + cpr->has_more_work |= cpr2->has_more_work; } else { bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); } raw_cons = NEXT_RAW_CMP(raw_cons); - if (cpr->has_more_work) - break; } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); - cpr->cp_raw_cons = raw_cons; + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); + if (raw_cons != cpr->cp_raw_cons) { + cpr->cp_raw_cons = raw_cons; + BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); + } return work_done; }
@@@ -4170,6 -4161,7 +4161,7 @@@ static int bnxt_hwrm_to_stderr(u32 hwrm case HWRM_ERR_CODE_NO_BUFFER: return -ENOMEM; case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: return -EAGAIN; case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: return -EOPNOTSUPP; @@@ -5069,10 -5061,8 +5061,8 @@@ vnic_mru return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
- static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) + static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { - u32 rc = 0; - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { struct hwrm_vnic_free_input req = {0};
@@@ -5080,10 -5070,9 +5070,9 @@@ req.vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } - return rc; }
static void bnxt_hwrm_vnic_free(struct bnxt *bp) @@@ -5200,14 -5189,13 +5189,13 @@@ static int bnxt_hwrm_ring_grp_alloc(str return rc; }
- static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) + static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { u16 i; - u32 rc = 0; struct hwrm_ring_grp_free_input req = {0};
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) - return 0; + return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@@ -5218,12 -5206,10 +5206,10 @@@ req.ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } mutex_unlock(&bp->hwrm_cmd_lock); - return rc; }
static int hwrm_ring_alloc_send_msg(struct bnxt *bp, @@@ -5847,8 -5833,7 +5833,7 @@@ bnxt_hwrm_reserve_pf_rings(struct bnxt if (bp->hwrm_spec_code < 0x10601) bp->hw_resc.resv_tx_rings = tx_rings;
- rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); }
static int @@@ -5869,8 -5854,7 +5854,7 @@@ bnxt_hwrm_reserve_vf_rings(struct bnxt if (rc) return rc;
- rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); }
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, @@@ -6030,7 -6014,6 +6014,6 @@@ static int bnxt_hwrm_check_vf_rings(str { struct hwrm_func_vf_cfg_input req = {0}; u32 flags; - int rc;
if (!BNXT_NEW_RM(bp)) return 0; @@@ -6047,8 -6030,8 +6030,8 @@@ flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@@ -6057,7 -6040,6 +6040,6 @@@ { struct hwrm_func_cfg_input req = {0}; u32 flags; - int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, stats, vnics); @@@ -6075,8 -6057,8 +6057,8 @@@ }
req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@@ -6315,16 -6297,16 +6297,16 @@@ int bnxt_hwrm_set_coal(struct bnxt *bp return rc; }
- static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) + static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - int rc = 0, i; struct hwrm_stat_ctx_free_input req = {0}; + int i;
if (!bp->bnapi) - return 0; + return;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - return 0; + return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
@@@ -6336,14 -6318,13 +6318,13 @@@ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT);
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } mutex_unlock(&bp->hwrm_cmd_lock); - return rc; }
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) @@@ -6548,8 -6529,8 +6529,8 @@@ static int bnxt_hwrm_func_backing_store __le64 *pg_dir; u32 flags = 0; u8 *pg_attr; - int i, rc; u32 ena; + int i;
if (!ctx) return 0; @@@ -6636,8 -6617,7 +6617,7 @@@ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } req.flags = cpu_to_le32(flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, @@@ -7341,7 -7321,6 +7321,6 @@@ int bnxt_hwrm_fw_set_time(struct bnxt *
static int bnxt_hwrm_port_qstats(struct bnxt *bp) { - int rc; struct bnxt_pf_info *pf = &bp->pf; struct hwrm_port_qstats_input req = {0};
@@@ -7352,8 -7331,7 +7331,7 @@@ req.port_id = cpu_to_le16(pf->port_id); req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) @@@ -7507,7 -7485,6 +7485,6 @@@ static void bnxt_hwrm_resource_free(str static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { struct hwrm_func_cfg_input req = {0}; - int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); @@@ -7518,14 -7495,12 +7495,12 @@@ req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { struct hwrm_func_cfg_input req = {0}; - int rc;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; @@@ -7537,8 -7512,7 +7512,7 @@@ if (size == 128) req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) @@@ -8796,6 -8770,7 +8770,7 @@@ static int bnxt_hwrm_if_change(struct b bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + bnxt_dcb_free(bp); rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@@ -8891,14 -8866,12 +8866,12 @@@ int bnxt_hwrm_alloc_wol_fltr(struct bnx int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { struct hwrm_wol_filter_free_input req = {0}; - int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); req.port_id = cpu_to_le16(bp->pf.port_id); req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); req.wol_filter_id = bp->wol_filter_id; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) @@@ -10982,13 -10955,13 +10955,13 @@@ static int bnxt_change_mtu(struct net_d struct bnxt *bp = netdev_priv(dev);
if (netif_running(dev)) - bnxt_close_nic(bp, false, false); + bnxt_close_nic(bp, true, false);
dev->mtu = new_mtu; bnxt_set_ring_params(bp);
if (netif_running(dev)) - return bnxt_open_nic(bp, false, false); + return bnxt_open_nic(bp, true, false);
return 0; } @@@ -11456,6 -11429,8 +11429,8 @@@ static void bnxt_remove_one(struct pci_ bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true); + if (BNXT_PF(bp)) + devlink_port_type_clear(&bp->dl_port); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); bnxt_dl_unregister(bp); @@@ -11755,27 -11730,22 +11730,22 @@@ static int bnxt_init_mac_addr(struct bn static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) { struct pci_dev *pdev = bp->pdev; - int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - u32 dw; + u64 qword;
- if (!pos) { - netdev_info(bp->dev, "Unable do read adapter's DSN\n"); + qword = pci_get_dsn(pdev); + if (!qword) { + netdev_info(bp->dev, "Unable to read adapter's DSN\n"); return -EOPNOTSUPP; }
- /* DSN (two dw) is at an offset of 4 from the cap pos */ - pos += 4; - pci_read_config_dword(pdev, pos, &dw); - put_unaligned_le32(dw, &dsn[0]); - pci_read_config_dword(pdev, pos + 4, &dw); - put_unaligned_le32(dw, &dsn[4]); + put_unaligned_le64(qword, dsn); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; }
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed; struct net_device *dev; struct bnxt *bp; int rc, max_irqs; @@@ -11783,9 -11753,6 +11753,6 @@@ if (pci_is_bridge(pdev)) return -ENODEV;
- if (version_printed++ == 0) - pr_info("%s", version); - /* Clear any pending DMA transactions from crash kernel * while loading driver in capture kernel. */ diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f67e6729a2c,cc807ba6f163..677bab95b937 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@@ -1236,7 -1236,6 +1236,6 @@@ static void bnxt_get_drvinfo(struct net struct bnxt *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = bnxt_get_num_stats(bp); @@@ -2007,8 -2006,8 +2006,8 @@@ int bnxt_flash_package_from_file(struc struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; const struct firmware *fw; - int rc, hwrm_err = 0; u32 item_len; + int rc = 0; u16 index;
bnxt_hwrm_fw_set_time(bp); @@@ -2052,14 -2051,15 +2051,14 @@@ memcpy(kmem, fw->data, fw->size); modify.host_src_addr = cpu_to_le64(dma_handle);
- hwrm_err = hwrm_send_message(bp, &modify, - sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + rc = hwrm_send_message(bp, &modify, sizeof(modify), + FLASH_PACKAGE_TIMEOUT); dma_free_coherent(&bp->pdev->dev, fw->size, kmem, dma_handle); } } release_firmware(fw); - if (rc || hwrm_err) + if (rc) goto err_exit;
if ((install_type & 0xffff) == 0) @@@ -2068,19 -2068,20 +2067,19 @@@ install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock); - hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) { + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
if (resp->error_code && error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - hwrm_err = _hwrm_send_message(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); } - if (hwrm_err) + if (rc) goto flash_pkg_exit; }
@@@ -2092,7 -2093,7 +2091,7 @@@ flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); err_exit: - if (hwrm_err == -EACCES) + if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } @@@ -2605,7 -2606,7 +2604,7 @@@ static int bnxt_set_phys_id(struct net_ struct bnxt_led_cfg *led_cfg; u8 led_state; __le16 duration; - int i, rc; + int i;
if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; @@@ -2631,8 -2632,7 +2630,7 @@@ led_cfg->led_blink_off = duration; led_cfg->led_group_id = bp->leds[i].led_group_id; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) @@@ -3471,6 -3471,12 +3469,12 @@@ void bnxt_ethtool_free(struct bnxt *bp }
const struct ethtool_ops bnxt_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_STATS_BLOCK_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 97f90edbc068,3da25a2b5cc7..75fde0d4d493 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -90,11 -90,6 +90,6 @@@
char cxgb4_driver_name[] = KBUILD_MODNAME;
- #ifdef DRV_VERSION - #undef DRV_VERSION - #endif - #define DRV_VERSION "2.0.0-ko" - const char cxgb4_driver_version[] = DRV_VERSION; #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ @@@ -137,7 -132,6 +132,6 @@@ MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); - MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); @@@ -3626,8 -3620,6 +3620,6 @@@ static void cxgb4_mgmt_get_drvinfo(stru struct adapter *adapter = netdev2adap(dev);
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); - strlcpy(info->version, cxgb4_driver_version, - sizeof(info->version)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } @@@ -5381,11 -5373,12 +5373,11 @@@ static inline bool is_x_10g_port(const static int cfg_queues(struct adapter *adap) { u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; + u32 i, n10g = 0, qidx = 0, n1g = 0; + u32 ncpus = num_online_cpus(); u32 niqflint, neq, num_ulds; struct sge *s = &adap->sge; - u32 i, n10g = 0, qidx = 0; -#ifndef CONFIG_CHELSIO_T4_DCB - int q10g = 0; -#endif + u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { @@@ -5423,50 -5416,44 +5415,50 @@@ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); + + /* We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; + + n1g = adap->params.nports - n10g; #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its * own TX Queue in order to prevent Head-Of-Line Blocking. */ + q1g = 8; if (adap->params.nports * 8 > avail_eth_qsets) { dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", avail_eth_qsets, adap->params.nports * 8); return -ENOMEM; }
- for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); + if (adap->params.nports * ncpus < avail_eth_qsets) + q10g = max(8U, ncpus); + else + q10g = max(8U, q10g);
- pi->first_qset = qidx; - pi->nqsets = is_kdump_kernel() ? 1 : 8; - qidx += pi->nqsets; - } -#else /* !CONFIG_CHELSIO_T4_DCB */ - /* We default to 1 queue per non-10G port and up to # of cores queues - * per 10G port. - */ - if (n10g) - q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; - if (q10g > netif_get_num_default_rss_queues()) - q10g = netif_get_num_default_rss_queues(); + while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g)) + q10g--;
- if (is_kdump_kernel()) +#else /* !CONFIG_CHELSIO_T4_DCB */ + q1g = 1; + q10g = min(q10g, ncpus); +#endif /* !CONFIG_CHELSIO_T4_DCB */ + if (is_kdump_kernel()) { q10g = 1; + q1g = 1; + }
for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx; - pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; qidx += pi->nqsets; } -#endif /* !CONFIG_CHELSIO_T4_DCB */
s->ethqsets = qidx; s->max_ethqsets = qidx; /* MSI-X may lower it later */ @@@ -5478,7 -5465,7 +5470,7 @@@ * capped by the number of available cores. */ num_ulds = adap->num_uld + adap->num_ofld_uld; - i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus()); + i = min_t(u32, MAX_OFLD_QSETS, ncpus); avail_uld_qsets = roundup(i, adap->params.nports); if (avail_qsets < num_ulds * adap->params.nports) { adap->params.offload = 0; @@@ -6086,8 -6073,6 +6078,6 @@@ static int init_one(struct pci_dev *pde int i, err; u32 whoami;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { /* Just info, some other driver may have claimed the device. */ diff --combined drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ca74a684a904,c0fe305b9d16..46039d80bb43 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@@ -1,5 -1,4 +1,5 @@@ /* Copyright 2008 - 2016 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@@ -124,22 -123,7 +124,22 @@@ MODULE_PARM_DESC(tx_timeout, "The Tx ti #define FSL_QMAN_MAX_OAL 127
/* Default alignment for start of data in an Rx FD */ +#ifdef CONFIG_DPAA_ERRATUM_A050385 +/* aligning data start to 64 avoids DMA transaction splits, unless the buffer + * is crossing a 4k page boundary + */ +#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16) +/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary + * crossings; also, all SG fragments except the last must have a size multiple + * of 256 to avoid DMA transaction splits + */ +#define DPAA_A050385_ALIGN 256 +#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \ + DPAA_A050385_ALIGN : 16) +#else #define DPAA_FD_DATA_ALIGNMENT 16 +#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT +#endif
/* The DPAA requires 256 bytes reserved and mapped for the SGT */ #define DPAA_SGT_SIZE 256 @@@ -174,13 -158,8 +174,13 @@@ #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) #define DPAA_TIME_STAMP_SIZE 8 #define DPAA_HASH_RESULTS_SIZE 8 +#ifdef CONFIG_DPAA_ERRATUM_A050385 +#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\ + + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE)) +#else #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ dpaa_rx_extra_headroom) +#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@@ -201,12 -180,7 +201,12 @@@ static struct dpaa_bp *dpaa_bp_array[BM
#define DPAA_BP_RAW_SIZE 4096
+#ifdef CONFIG_DPAA_ERRATUM_A050385 +#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \ + ~(DPAA_A050385_ALIGN - 1)) +#else #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) +#endif
static int dpaa_max_frm;
@@@ -259,8 -233,20 +259,20 @@@ static int dpaa_netdev_init(struct net_ net_dev->features |= net_dev->hw_features; net_dev->vlan_features = net_dev->features;
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + if (is_valid_ether_addr(mac_addr)) { + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else { + eth_hw_addr_random(net_dev); + err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)net_dev->dev_addr); + if (err) { + dev_err(dev, "Failed to set random MAC address\n"); + return -EINVAL; + } + dev_info(dev, "Using random MAC address: %pM\n", + net_dev->dev_addr); + }
net_dev->ethtool_ops = &dpaa_ethtool_ops;
@@@ -1218,7 -1204,7 +1230,7 @@@ static int dpaa_eth_init_rx_port(struc buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; buf_prefix_content.pass_time_stamp = true; - buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; + buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
rx_p = ¶ms.specific_params.rx_params; rx_p->err_fqid = errq->fqid; @@@ -1688,8 -1674,6 +1700,8 @@@ static u8 rx_csum_offload(const struct return CHECKSUM_NONE; }
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a))) + /* Build a linear skb around the received buffer. * We are guaranteed there is enough room at the end of the data buffer to * accommodate the shared info area of the skb. @@@ -1761,7 -1745,8 +1773,7 @@@ static struct sk_buff *sg_fd_to_skb(con
sg_addr = qm_sg_addr(&sgt[i]); sg_vaddr = phys_to_virt(sg_addr); - WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, - SMP_CACHE_BYTES)); + WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
dma_unmap_page(priv->rx_dma_dev, sg_addr, DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); @@@ -2049,75 -2034,6 +2061,75 @@@ static inline int dpaa_xmit(struct dpaa return 0; }
+#ifdef CONFIG_DPAA_ERRATUM_A050385 +int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct sk_buff *new_skb, *skb = *s; + unsigned char *start, i; + + /* check linear buffer alignment */ + if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) + goto workaround; + + /* linear buffers just need to have an aligned start */ + if (!skb_is_nonlinear(skb)) + return 0; + + /* linear data size for nonlinear skbs needs to be aligned */ + if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN)) + goto workaround; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + /* all fragments need to have aligned start addresses */ + if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN)) + goto workaround; + + /* all but last fragment need to have aligned sizes */ + if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) && + (i < skb_shinfo(skb)->nr_frags - 1)) + goto workaround; + } + + return 0; + +workaround: + /* copy all the skb content into a new linear buffer */ + new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + + priv->tx_headroom); + if (!new_skb) + return -ENOMEM; + + /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */ + skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); + + /* Workaround for DPAA_A050385 requires data start to be aligned */ + start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); + if (start - new_skb->data != 0) + skb_reserve(new_skb, start - new_skb->data); + + skb_put(new_skb, skb->len); + skb_copy_bits(skb, 0, new_skb->data, skb->len); + skb_copy_header(new_skb, skb); + new_skb->dev = skb->dev; + + /* We move the headroom when we align it so we have to reset the + * network and transport header offsets relative to the new data + * pointer. The checksum offload relies on these offsets. + */ + skb_set_network_header(new_skb, skb_network_offset(skb)); + skb_set_transport_header(new_skb, skb_transport_offset(skb)); + + /* TODO: does timestamping need the result in the old skb? */ + dev_kfree_skb(skb); + *s = new_skb; + + return 0; +} +#endif + static netdev_tx_t dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { @@@ -2164,14 -2080,6 +2176,14 @@@ nonlinear = skb_is_nonlinear(skb); }
+#ifdef CONFIG_DPAA_ERRATUM_A050385 + if (unlikely(fman_has_errata_a050385())) { + if (dpaa_a050385_wa(net_dev, &skb)) + goto enomem; + nonlinear = skb_is_nonlinear(skb); + } +#endif + if (nonlinear) { /* Just create a S/G fd based on the skb */ err = skb_to_sg_fd(priv, skb, &fd); @@@ -2845,7 -2753,9 +2857,7 @@@ static inline u16 dpaa_get_headroom(str headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
- return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, - DPAA_FD_DATA_ALIGNMENT) : - headroom; + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); }
static int dpaa_eth_probe(struct platform_device *pdev) diff --combined drivers/net/ethernet/freescale/fec_main.c index 23c5fef2f1ad,ce154695f67c..c1c267b61647 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -2128,7 -2128,6 +2128,6 @@@ static void fec_enet_get_drvinfo(struc
strlcpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); }
@@@ -2529,15 -2528,15 +2528,15 @@@ fec_enet_set_coalesce(struct net_devic return -EINVAL; }
- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); if (cycle > 0xFFFF) { dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); return -EINVAL; }
- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); if (cycle > 0xFFFF) { - dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); return -EINVAL; }
@@@ -2642,6 -2641,8 +2641,8 @@@ fec_enet_set_wol(struct net_device *nde }
static const struct ethtool_ops fec_enet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = fec_enet_get_drvinfo, .get_regs_len = fec_enet_get_regs_len, .get_regs = fec_enet_get_regs, @@@ -3793,6 -3794,7 +3794,7 @@@ static struct platform_driver fec_drive .name = DRIVER_NAME, .pm = &fec_pm_ops, .of_match_table = fec_dt_ids, + .suppress_bind_attrs = true, }, .id_table = fec_devtype, .probe = fec_probe, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index acf0c29fcbcd,cdf7f4bdef86..839c2e5ab86d --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@@ -824,6 -824,8 +824,8 @@@ static void hclge_get_mac_stat(struct h static int hclge_parse_func_status(struct hclge_dev *hdev, struct hclge_func_status_cmd *status) { + #define HCLGE_MAC_ID_MASK 0xF + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) return -EINVAL;
@@@ -833,6 -835,7 +835,7 @@@ else hdev->flag &= ~HCLGE_FLAG_MAIN;
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; return 0; }
@@@ -2446,12 -2449,10 +2449,12 @@@ static int hclge_cfg_mac_speed_dup_hw(s
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) { + struct hclge_mac *mac = &hdev->hw.mac; int ret;
duplex = hclge_check_speed_dup(duplex, speed); - if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex) return 0;
ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); @@@ -3441,7 -3442,7 +3444,7 @@@ static void hclge_do_reset(struct hclge u32 val;
if (hclge_get_hw_reset_stat(handle)) { - dev_info(&pdev->dev, "Hardware reset not finish\n"); + dev_info(&pdev->dev, "hardware reset not finish\n"); dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); @@@ -3450,20 -3451,20 +3453,20 @@@
switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: + dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); - dev_info(&pdev->dev, "Global Reset requested\n"); break; case HNAE3_FUNC_RESET: - dev_info(&pdev->dev, "PF Reset requested\n"); + dev_info(&pdev->dev, "PF reset requested\n"); /* schedule again to check later */ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; default: dev_warn(&pdev->dev, - "Unsupported reset type: %d\n", hdev->reset_type); + "unsupported reset type: %d\n", hdev->reset_type); break; } } @@@ -7353,7 -7354,6 +7356,6 @@@ int hclge_add_mc_addr_common(struct hcl return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { @@@ -7398,7 -7398,6 +7400,6 @@@ int hclge_rm_mc_addr_common(struct hclg }
memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@@ -7618,11 -7617,17 +7619,17 @@@ static int hclge_set_vf_mac(struct hnae }
ether_addr_copy(vport->vf_info.mac, mac_addr); - dev_info(&hdev->pdev->dev, - "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", - vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport); + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", + vf, mac_addr); + return hclge_inform_reset_assert_to_vf(vport); + } + + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n", + vf, mac_addr); + return 0; }
static int hclge_add_mgr_tbl(struct hclge_dev *hdev, @@@ -9075,8 -9080,8 +9082,8 @@@ init_nic_err static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, struct hclge_vport *vport) { - struct hnae3_client *client = vport->roce.client; struct hclge_dev *hdev = ae_dev->priv; + struct hnae3_client *client; int rst_cnt; int ret;
@@@ -10251,8 -10256,9 +10258,9 @@@ static int hclge_dfx_reg_fetch_data(str static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) { u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); - int data_len_per_desc, data_len, bd_num, i; + int data_len_per_desc, bd_num, i; int bd_num_list[BD_LIST_MAX_NUM]; + u32 data_len; int ret;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); diff --combined drivers/net/ethernet/intel/e1000e/netdev.c index 0f02c7a5ee9b,1e1625122596..177c6da80c57 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -3280,10 -3280,10 +3280,10 @@@ static void e1000_configure_rx(struct e
dev_info(&adapter->pdev->dev, "Some CPU C-states have been disabled in order to enable jumbo frames\n"); - pm_qos_update_request(&adapter->pm_qos_req, lat); + cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); } else { - pm_qos_update_request(&adapter->pm_qos_req, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&adapter->pm_qos_req, + PM_QOS_DEFAULT_VALUE); }
/* Enable Receives */ @@@ -3536,6 -3536,7 +3536,7 @@@ s32 e1000e_get_base_timinca(struct e100 break; case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; @@@ -3807,7 -3808,7 +3808,7 @@@ static void e1000_flush_tx_ring(struct tdt = er32(TDT(0)); BUG_ON(tdt != tx_ring->next_to_use); tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); - tx_desc->buffer_addr = tx_ring->dma; + tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
tx_desc->lower.data = cpu_to_le32(txd_lower | size); tx_desc->upper.data = 0; @@@ -4049,6 -4050,7 +4050,7 @@@ void e1000e_reset(struct e1000_adapter case e1000_pch_cnp: /* fall-through */ case e1000_pch_tgp: + case e1000_pch_adp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF;
@@@ -4636,7 -4638,8 +4638,7 @@@ int e1000e_open(struct net_device *netd e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */ - pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt @@@ -4678,7 -4681,7 +4680,7 @@@ return 0;
err_req_irq: - pm_qos_remove_request(&adapter->pm_qos_req); + cpu_latency_qos_remove_request(&adapter->pm_qos_req); e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter->rx_ring); @@@ -4742,7 -4745,7 +4744,7 @@@ int e1000e_close(struct net_device *net !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter);
- pm_qos_remove_request(&adapter->pm_qos_req); + cpu_latency_qos_remove_request(&adapter->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@@ -5461,10 -5464,7 +5463,7 @@@ static int e1000_tso(struct e1000_ring cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + tcp_v6_gso_csum_prep(skb); ipcse = 0; } ipcss = skb_network_offset(skb); @@@ -7759,6 -7759,11 +7758,11 @@@ static const struct pci_device_id e1000 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --combined drivers/net/ethernet/mscc/ocelot.c index d3b7373c5961,06f9d013f807..dc0e27328661 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@@ -442,8 -442,23 +442,23 @@@ void ocelot_adjust_link(struct ocelot * ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA | mode, DEV_MAC_MODE_CFG);
- if (ocelot->ops->pcs_init) - ocelot->ops->pcs_init(ocelot, port); + /* Disable HDX fast control */ + ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, + DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, + PCS1G_MODE_CFG); + ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
/* Enable MAC module */ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | @@@ -1398,7 -1413,7 +1413,7 @@@ void ocelot_bridge_stp_state_set(struc * a source for the other ports. */ for (p = 0; p < ocelot->num_phys_ports; p++) { - if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) { + if (ocelot->bridge_fwd_mask & BIT(p)) { unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) { @@@ -1413,18 -1428,10 +1428,10 @@@ } }
- /* Avoid the NPI port from looping back to itself */ - if (p != ocelot->cpu) - mask |= BIT(ocelot->cpu); - ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + p); } else { - /* Only the CPU port, this is compatible with link - * aggregation. - */ - ocelot_write_rix(ocelot, - BIT(ocelot->cpu), + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + p); } } @@@ -2176,29 -2183,24 +2183,29 @@@ static int ocelot_init_timestamp(struc return 0; }
-static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu) +/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. + * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. + */ +static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; int atop_wm;
- ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG); + ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis - * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ - * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ + * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ + * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ */ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | SYS_PAUSE_CFG_PAUSE_STOP(101) | SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
/* Tail dropping watermark */ - atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ; - ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu), + atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / + OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen), SYS_ATOP, port); ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); } @@@ -2227,10 -2229,9 +2234,10 @@@ void ocelot_init_port(struct ocelot *oc DEV_MAC_HDX_CFG);
/* Set Max Length and maximum tags allowed */ - ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN); + ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN); ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA | DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, DEV_MAC_TAGS_CFG);
@@@ -2299,42 -2300,62 +2306,62 @@@ int ocelot_probe_port(struct ocelot *oc } EXPORT_SYMBOL(ocelot_probe_port);
- void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, - enum ocelot_tag_prefix injection, - enum ocelot_tag_prefix extraction) + /* Configure and enable the CPU port module, which is a set of queues. + * If @npi contains a valid port index, the CPU port module is connected + * to the Node Processor Interface (NPI). This is the mode through which + * frames can be injected from and extracted to an external CPU, + * over Ethernet. + */ + void ocelot_configure_cpu(struct ocelot *ocelot, int npi, + enum ocelot_tag_prefix injection, + enum ocelot_tag_prefix extraction) { - /* Configure and enable the CPU port. */ + int cpu = ocelot->num_phys_ports; + + /* The unicast destination PGID for the CPU port module is unused */ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); + /* Instead set up a multicast destination PGID for traffic copied to + * the CPU. Whitelisted MAC addresses like the port netdevice MAC + * addresses will be copied to the CPU via this PGID. + */ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | ANA_PORT_PORT_CFG_PORTID_VAL(cpu), ANA_PORT_PORT_CFG, cpu);
- /* If the CPU port is a physical port, set up the port in Node - * Processor Interface (NPI) mode. This is the mode through which - * frames can be injected from and extracted to an external CPU. - * Only one port can be an NPI at the same time. - */ - if (cpu < ocelot->num_phys_ports) { + if (npi >= 0 && npi < ocelot->num_phys_ports) { - int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN; + int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | - QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu), + QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi), QSYS_EXT_CPU_CFG);
if (injection == OCELOT_TAG_PREFIX_SHORT) - mtu += OCELOT_SHORT_PREFIX_LEN; + sdu += OCELOT_SHORT_PREFIX_LEN; else if (injection == OCELOT_TAG_PREFIX_LONG) - mtu += OCELOT_LONG_PREFIX_LEN; + sdu += OCELOT_LONG_PREFIX_LEN;
- ocelot_port_set_maxlen(ocelot, cpu, sdu); - ocelot_port_set_mtu(ocelot, npi, mtu); ++ ocelot_port_set_maxlen(ocelot, npi, sdu); + + /* Enable NPI port */ + ocelot_write_rix(ocelot, + QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, npi); + /* NPI port Injection/Extraction configuration */ + ocelot_write_rix(ocelot, + SYS_PORT_MODE_INCL_XTR_HDR(extraction) | + SYS_PORT_MODE_INCL_INJ_HDR(injection), + SYS_PORT_MODE, npi); }
- /* CPU port Injection/Extraction configuration */ + /* Enable CPU port module */ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | QSYS_SWITCH_PORT_MODE_PORT_ENA, QSYS_SWITCH_PORT_MODE, cpu); + /* CPU port Injection/Extraction configuration */ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) | SYS_PORT_MODE_INCL_INJ_HDR(injection), SYS_PORT_MODE, cpu); @@@ -2344,10 -2365,8 +2371,8 @@@ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), ANA_PORT_VLAN_CFG, cpu); - - ocelot->cpu = cpu; } - EXPORT_SYMBOL(ocelot_set_cpu_port); + EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot) { @@@ -2499,7 -2518,6 +2524,6 @@@ void ocelot_deinit(struct ocelot *ocelo cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); - ocelot_ace_deinit(); if (ocelot->ptp_clock) ptp_clock_unregister(ocelot->ptp_clock);
diff --combined drivers/net/ethernet/pensando/ionic/ionic_lif.c index c2f5b691e0fa,aaf4a40fa98b..b903016193df --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@@ -84,7 -84,7 +84,7 @@@ static void ionic_link_status_check(str netdev_info(netdev, "Link up - %d Gbps\n", le32_to_cpu(lif->info->status.link_speed) / 1000);
- if (test_bit(IONIC_LIF_UP, lif->state)) { + if (test_bit(IONIC_LIF_F_UP, lif->state)) { netif_tx_wake_all_queues(lif->netdev); netif_carrier_on(netdev); } @@@ -93,12 -93,12 +93,12 @@@
/* carrier off first to avoid watchdog timeout */ netif_carrier_off(netdev); - if (test_bit(IONIC_LIF_UP, lif->state)) + if (test_bit(IONIC_LIF_F_UP, lif->state)) netif_tx_stop_all_queues(netdev); }
link_out: - clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state); + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); }
static void ionic_link_status_check_request(struct ionic_lif *lif) @@@ -106,7 -106,7 +106,7 @@@ struct ionic_deferred_work *work;
/* we only need one request outstanding at a time */ - if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state)) + if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return;
if (in_interrupt()) { @@@ -424,8 -424,9 +424,9 @@@ static int ionic_qcq_alloc(struct ionic ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, IONIC_INTR_MASK_SET);
- new->intr.cpu = new->intr.index % num_online_cpus(); - if (cpu_online(new->intr.cpu)) + new->intr.cpu = cpumask_local_spread(new->intr.index, + dev_to_node(dev)); + if (new->intr.cpu != -1) cpumask_set_cpu(new->intr.cpu, &new->intr.affinity_mask); } else { @@@ -1093,6 -1094,7 +1094,7 @@@ static int ionic_set_nic_features(struc u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | IONIC_ETH_HW_VLAN_RX_STRIP | IONIC_ETH_HW_VLAN_RX_FILTER; + u64 old_hw_features; int err;
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); @@@ -1100,9 -1102,13 +1102,13 @@@ if (err) return err;
+ old_hw_features = lif->hw_features; lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & ctx.comp.lif_setattr.features);
+ if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) + ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); + if ((vlan_flags & features) && !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); @@@ -1149,6 -1155,10 +1155,10 @@@ static int ionic_init_nic_features(stru netdev_features_t features; int err;
+ /* no netdev features on the management device */ + if (lif->ionic->is_mgmt_nic) + return 0; + /* set up what we expect to support by default */ features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | @@@ -1356,13 -1366,15 +1366,15 @@@ int ionic_lif_rss_config(struct ionic_l .cmd.lif_setattr = { .opcode = IONIC_CMD_LIF_SETATTR, .attr = IONIC_LIF_ATTR_RSS, - .rss.types = cpu_to_le16(types), .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), }, }; unsigned int i, tbl_sz;
- lif->rss_types = types; + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { + lif->rss_types = types; + ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); + }
if (key) memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); @@@ -1578,7 -1590,7 +1590,7 @@@ int ionic_open(struct net_device *netde netif_set_real_num_tx_queues(netdev, lif->nxqs); netif_set_real_num_rx_queues(netdev, lif->nxqs);
- set_bit(IONIC_LIF_UP, lif->state); + set_bit(IONIC_LIF_F_UP, lif->state);
ionic_link_status_check_request(lif); if (netif_carrier_ok(netdev)) @@@ -1598,13 -1610,13 +1610,13 @@@ int ionic_stop(struct net_device *netde struct ionic_lif *lif = netdev_priv(netdev); int err = 0;
- if (!test_bit(IONIC_LIF_UP, lif->state)) { + if (!test_bit(IONIC_LIF_F_UP, lif->state)) { dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n", __func__, lif->name); return 0; } dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name); - clear_bit(IONIC_LIF_UP, lif->state); + clear_bit(IONIC_LIF_F_UP, lif->state);
/* carrier off before disabling queues to avoid watchdog timeout */ netif_carrier_off(netdev); @@@ -1688,7 -1700,7 +1700,7 @@@ static int ionic_set_vf_mac(struct net_ if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) return -EINVAL;
- down_read(&ionic->vf_op_lock); + down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { ret = -EINVAL; @@@ -1698,7 -1710,7 +1710,7 @@@ ether_addr_copy(ionic->vfs[vf].macaddr, mac); }
- up_read(&ionic->vf_op_lock); + up_write(&ionic->vf_op_lock); return ret; }
@@@ -1719,7 -1731,7 +1731,7 @@@ static int ionic_set_vf_vlan(struct net if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT;
- down_read(&ionic->vf_op_lock); + down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { ret = -EINVAL; @@@ -1730,7 -1742,7 +1742,7 @@@ ionic->vfs[vf].vlanid = vlan; }
- up_read(&ionic->vf_op_lock); + up_write(&ionic->vf_op_lock); return ret; }
@@@ -1871,7 -1883,7 +1883,7 @@@ int ionic_reset_queues(struct ionic_li /* Put off the next watchdog timeout */ netif_trans_update(lif->netdev);
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET); + err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); if (err) return err;
@@@ -1881,7 -1893,7 +1893,7 @@@ if (!err && running) ionic_open(lif->netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return err; } @@@ -2048,10 -2060,10 +2060,10 @@@ void ionic_lifs_free(struct ionic *ioni
static void ionic_lif_deinit(struct ionic_lif *lif) { - if (!test_bit(IONIC_LIF_INITED, lif->state)) + if (!test_bit(IONIC_LIF_F_INITED, lif->state)) return;
- clear_bit(IONIC_LIF_INITED, lif->state); + clear_bit(IONIC_LIF_F_INITED, lif->state);
ionic_rx_filters_deinit(lif); ionic_lif_rss_deinit(lif); @@@ -2287,7 -2299,7 +2299,7 @@@ static int ionic_lif_init(struct ionic_
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
- set_bit(IONIC_LIF_INITED, lif->state); + set_bit(IONIC_LIF_F_INITED, lif->state);
INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
@@@ -2375,6 -2387,12 +2387,12 @@@ int ionic_lifs_register(struct ionic *i { int err;
+ /* the netdev is not registered on the management device, it is + * only used as a vehicle for napi operations on the adminq + */ + if (ionic->is_mgmt_nic) + return 0; + INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
ionic->nb.notifier_call = ionic_lif_notify; @@@ -2408,6 -2426,9 +2426,9 @@@ void ionic_lifs_unregister(struct ioni * current model, so don't bother searching the * ionic->lif for candidates to unregister */ + if (!ionic->master_lif) + return; + cancel_work_sync(&ionic->master_lif->deferred.work); cancel_work_sync(&ionic->master_lif->tx_timeout_work); if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) diff --combined drivers/net/ethernet/sfc/efx.h index 95395d67ea2d,da54afaa3c44..66dcab140449 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@@ -20,7 -20,6 +20,7 @@@ netdev_tx_t efx_hard_start_xmit(struct struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data); extern unsigned int efx_piobuf_size; @@@ -151,24 -150,6 +151,6 @@@ static inline s32 efx_filter_get_rx_ids int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); - static inline void efx_filter_rfs_expire(struct work_struct *data) - { - struct delayed_work *dwork = to_delayed_work(data); - struct efx_channel *channel; - unsigned int time, quota; - - channel = container_of(dwork, struct efx_channel, filter_work); - time = jiffies - channel->rfs_last_expiry; - quota = channel->rfs_filter_count * time / (30 * HZ); - if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) - channel->rfs_last_expiry += time; - /* Ensure we do more work eventually even if NAPI poll is not happening */ - schedule_delayed_work(dwork, 30 * HZ); - } - #define efx_filter_rfs_enabled() 1 - #else - static inline void efx_filter_rfs_expire(struct work_struct *data) {} - #define efx_filter_rfs_enabled() 0 #endif
/* RSS contexts */ diff --combined drivers/net/ethernet/sfc/efx_channels.c index 73d4e39b5b16,d2d738314c50..c492523b986c --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@@ -485,6 -485,23 +485,23 @@@ void efx_remove_eventq(struct efx_chann * *************************************************************************/
+ #ifdef CONFIG_RFS_ACCEL + static void efx_filter_rfs_expire(struct work_struct *data) + { + struct delayed_work *dwork = to_delayed_work(data); + struct efx_channel *channel; + unsigned int time, quota; + + channel = container_of(dwork, struct efx_channel, filter_work); + time = jiffies - channel->rfs_last_expiry; + quota = channel->rfs_filter_count * time / (30 * HZ); + if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) + channel->rfs_last_expiry += time; + /* Ensure we do more work eventually even if NAPI poll is not happening */ + schedule_delayed_work(dwork, 30 * HZ); + } + #endif + /* Allocate and initialise a channel structure. */ struct efx_channel * efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) @@@ -583,7 -600,6 +600,7 @@@ struct efx_channel *efx_copy_channel(co if (tx_queue->channel) tx_queue->channel = channel; tx_queue->buffer = NULL; + tx_queue->cb_page = NULL; memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); }
@@@ -1167,6 -1183,9 +1184,9 @@@ static int efx_poll(struct napi_struct struct efx_channel *channel = container_of(napi, struct efx_channel, napi_str); struct efx_nic *efx = channel->efx; + #ifdef CONFIG_RFS_ACCEL + unsigned int time; + #endif int spent;
netif_vdbg(efx, intr, efx->net_dev, @@@ -1186,7 -1205,10 +1206,10 @@@
#ifdef CONFIG_RFS_ACCEL /* Perhaps expire some ARFS filters */ - mod_delayed_work(system_wq, &channel->filter_work, 0); + time = jiffies - channel->rfs_last_expiry; + /* Would our quota be >= 20? */ + if (channel->rfs_filter_count * time >= 600 * HZ) + mod_delayed_work(system_wq, &channel->filter_work, 0); #endif
/* There is no race here; although napi_disable() will diff --combined drivers/net/ethernet/sfc/net_driver.h index 8164f0edcbf0,392bd5b7017e..b836315bac87 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@@ -208,6 -208,8 +208,6 @@@ struct efx_tx_buffer * avoid cache-line ping-pong between the xmit path and the * completion path. * @merge_events: Number of TX merged completion events - * @completed_desc_ptr: Most recent completed pointer - only used with - * timestamping. * @completed_timestamp_major: Top part of the most recent tx timestamp. * @completed_timestamp_minor: Low part of the most recent tx timestamp. * @insert_count: Current insert pointer @@@ -267,6 -269,7 +267,6 @@@ struct efx_tx_queue unsigned int merge_events; unsigned int bytes_compl; unsigned int pkts_compl; - unsigned int completed_desc_ptr; u32 completed_timestamp_major; u32 completed_timestamp_minor;
@@@ -333,7 -336,7 +333,7 @@@ struct efx_rx_buffer struct efx_rx_page_state { dma_addr_t dma_addr;
- unsigned int __pad[0] ____cacheline_aligned; + unsigned int __pad[] ____cacheline_aligned; };
/** diff --combined drivers/net/ethernet/sfc/tx.c index 8aafc54a4684,696a77c20cb7..19b58563cb78 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@@ -287,9 -287,8 +287,8 @@@ static int efx_tx_tso_fallback(struct e return PTR_ERR(segments);
dev_consume_skb_any(skb); - skb = segments;
- skb_list_walk_safe(skb, skb, next) { + skb_list_walk_safe(segments, skb, next) { skb_mark_not_on_list(skb); efx_enqueue_skb(tx_queue, skb); } @@@ -535,44 -534,6 +534,44 @@@ netdev_tx_t efx_hard_start_xmit(struct return efx_enqueue_skb(tx_queue, skb); }
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int read_ptr; + bool finished = false; + + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (!finished) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + struct efx_nic *efx = tx_queue->efx; + + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious single TX completion\n", + tx_queue->queue); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + /* Need to check the flag before dequeueing. */ + if (buffer->flags & EFX_TX_BUF_SKB) + finished = true; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } + + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + EFX_WARN_ON_PARANOID(pkts_compl != 1); + + efx_xmit_done_check_empty(tx_queue); +} + void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; diff --combined drivers/net/phy/phylink.c index 6e66b8e77ec7,19db68d74cb4..a8eeaabb2d18 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@@ -181,9 -181,11 +181,11 @@@ static int phylink_parse_fixedlink(stru /* We treat the "pause" and "asym-pause" terminology as * defining the link partner's ability. */ if (fwnode_property_read_bool(fixed_node, "pause")) - pl->link_config.pause |= MLO_PAUSE_SYM; + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + pl->link_config.lp_advertising); if (fwnode_property_read_bool(fixed_node, "asym-pause")) - pl->link_config.pause |= MLO_PAUSE_ASYM; + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + pl->link_config.lp_advertising);
if (ret == 0) { desc = fwnode_gpiod_get_index(fixed_node, "link", 0, @@@ -215,9 -217,11 +217,11 @@@ DUPLEX_FULL : DUPLEX_HALF; pl->link_config.speed = prop[2]; if (prop[3]) - pl->link_config.pause |= MLO_PAUSE_SYM; + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + pl->link_config.lp_advertising); if (prop[4]) - pl->link_config.pause |= MLO_PAUSE_ASYM; + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + pl->link_config.lp_advertising); } }
@@@ -308,11 -312,13 +312,13 @@@ static int phylink_parse_mode(struct ph phylink_set(pl->supported, 1000baseT_Half); phylink_set(pl->supported, 1000baseT_Full); phylink_set(pl->supported, 1000baseX_Full); + phylink_set(pl->supported, 1000baseKX_Full); phylink_set(pl->supported, 2500baseT_Full); phylink_set(pl->supported, 2500baseX_Full); phylink_set(pl->supported, 5000baseT_Full); phylink_set(pl->supported, 10000baseT_Full); phylink_set(pl->supported, 10000baseKR_Full); + phylink_set(pl->supported, 10000baseKX4_Full); phylink_set(pl->supported, 10000baseCR_Full); phylink_set(pl->supported, 10000baseSR_Full); phylink_set(pl->supported, 10000baseLR_Full); @@@ -334,11 -340,42 +340,42 @@@ "failed to validate link configuration for in-band status\n"); return -EINVAL; } + + /* Check if MAC/PCS also supports Autoneg. */ + pl->link_config.an_enabled = phylink_test(pl->supported, Autoneg); }
return 0; }
+ static void phylink_apply_manual_flow(struct phylink *pl, + struct phylink_link_state *state) + { + /* If autoneg is disabled, pause AN is also disabled */ + if (!state->an_enabled) + state->pause &= ~MLO_PAUSE_AN; + + /* Manual configuration of pause modes */ + if (!(pl->link_config.pause & MLO_PAUSE_AN)) + state->pause = pl->link_config.pause; + } + + static void phylink_resolve_flow(struct phylink_link_state *state) + { + bool tx_pause, rx_pause; + + state->pause = MLO_PAUSE_NONE; + if (state->duplex == DUPLEX_FULL) { + linkmode_resolve_pause(state->advertising, + state->lp_advertising, + &tx_pause, &rx_pause); + if (tx_pause) + state->pause |= MLO_PAUSE_TX; + if (rx_pause) + state->pause |= MLO_PAUSE_RX; + } + } + static void phylink_mac_config(struct phylink *pl, const struct phylink_link_state *state) { @@@ -387,49 -424,45 +424,45 @@@ static void phylink_mac_pcs_get_state(s /* The fixed state is... fixed except for the link state, * which may be determined by a GPIO or a callback. */ - static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) + static void phylink_get_fixed_state(struct phylink *pl, + struct phylink_link_state *state) { *state = pl->link_config; if (pl->get_fixed_state) pl->get_fixed_state(pl->netdev, state); else if (pl->link_gpio) state->link = !!gpiod_get_value_cansleep(pl->link_gpio); + + phylink_resolve_flow(state); }
- /* Flow control is resolved according to our and the link partners - * advertisements using the following drawn from the 802.3 specs: - * Local device Link partner - * Pause AsymDir Pause AsymDir Result - * 1 X 1 X TX+RX - * 0 1 1 1 TX - * 1 1 0 1 RX - */ - static void phylink_resolve_flow(struct phylink *pl, - struct phylink_link_state *state) + static void phylink_mac_initial_config(struct phylink *pl) { - int new_pause = 0; + struct phylink_link_state link_state;
- if (pl->link_config.pause & MLO_PAUSE_AN) { - int pause = 0; + switch (pl->cur_link_an_mode) { + case MLO_AN_PHY: + link_state = pl->phy_state; + break;
- if (phylink_test(pl->link_config.advertising, Pause)) - pause |= MLO_PAUSE_SYM; - if (phylink_test(pl->link_config.advertising, Asym_Pause)) - pause |= MLO_PAUSE_ASYM; + case MLO_AN_FIXED: + phylink_get_fixed_state(pl, &link_state); + break;
- pause &= state->pause; + case MLO_AN_INBAND: + link_state = pl->link_config; + if (link_state.interface == PHY_INTERFACE_MODE_SGMII) + link_state.pause = MLO_PAUSE_NONE; + break;
- if (pause & MLO_PAUSE_SYM) - new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; - else if (pause & MLO_PAUSE_ASYM) - new_pause = state->pause & MLO_PAUSE_SYM ? - MLO_PAUSE_TX : MLO_PAUSE_RX; - } else { - new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; + default: /* can't happen */ + return; }
- state->pause &= ~MLO_PAUSE_TXRX_MASK; - state->pause |= new_pause; + link_state.link = false; + + phylink_apply_manual_flow(pl, &link_state); + phylink_mac_config(pl, &link_state); }
static const char *phylink_pause_to_str(int pause) @@@ -452,8 -485,11 +485,11 @@@ static void phylink_mac_link_up(struct struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface; - pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode, - pl->cur_interface, pl->phydev); + pl->ops->mac_link_up(pl->config, pl->phydev, + pl->cur_link_an_mode, pl->cur_interface, + link_state.speed, link_state.duplex, + !!(link_state.pause & MLO_PAUSE_TX), + !!(link_state.pause & MLO_PAUSE_RX));
if (ndev) netif_carrier_on(ndev); @@@ -493,7 -529,7 +529,7 @@@ static void phylink_resolve(struct work switch (pl->cur_link_an_mode) { case MLO_AN_PHY: link_state = pl->phy_state; - phylink_resolve_flow(pl, &link_state); + phylink_apply_manual_flow(pl, &link_state); phylink_mac_config_up(pl, &link_state); break;
@@@ -515,10 -551,12 +551,12 @@@ link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with - * the pause mode bits. */ - link_state.pause |= pl->phy_state.pause; - phylink_resolve_flow(pl, &link_state); + * the PHY flow control bits. */ + link_state.pause = pl->phy_state.pause; + phylink_apply_manual_flow(pl, &link_state); phylink_mac_config(pl, &link_state); + } else { + phylink_apply_manual_flow(pl, &link_state); } break; } @@@ -705,15 -743,18 +743,18 @@@ static void phylink_phy_change(struct p bool do_carrier) { struct phylink *pl = phydev->phylink; + bool tx_pause, rx_pause; + + phy_get_pause(phydev, &tx_pause, &rx_pause);
mutex_lock(&pl->state_mutex); pl->phy_state.speed = phydev->speed; pl->phy_state.duplex = phydev->duplex; pl->phy_state.pause = MLO_PAUSE_NONE; - if (phydev->pause) - pl->phy_state.pause |= MLO_PAUSE_SYM; - if (phydev->asym_pause) - pl->phy_state.pause |= MLO_PAUSE_ASYM; + if (tx_pause) + pl->phy_state.pause |= MLO_PAUSE_TX; + if (rx_pause) + pl->phy_state.pause |= MLO_PAUSE_RX; pl->phy_state.interface = phydev->interface; pl->phy_state.link = up; mutex_unlock(&pl->state_mutex); @@@ -761,14 -802,8 +802,14 @@@ static int phylink_bringup_phy(struct p config.interface = interface;
ret = phylink_validate(pl, supported, &config); - if (ret) + if (ret) { + phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n", + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising, + ret); return ret; + }
phy->phylink = pl; phy->phy_link_change = phylink_phy_change; @@@ -783,6 -818,9 +824,9 @@@ mutex_lock(&pl->state_mutex); pl->phydev = phy; pl->phy_state.interface = interface; + pl->phy_state.pause = MLO_PAUSE_NONE; + pl->phy_state.speed = SPEED_UNKNOWN; + pl->phy_state.duplex = DUPLEX_UNKNOWN; linkmode_copy(pl->supported, supported); linkmode_copy(pl->link_config.advertising, config.advertising);
@@@ -1012,8 -1050,7 +1056,7 @@@ void phylink_start(struct phylink *pl * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. */ - phylink_resolve_flow(pl, &pl->link_config); - phylink_mac_config(pl, &pl->link_config); + phylink_mac_initial_config(pl);
/* Restart autonegotiation if using 802.3z to ensure that the link * parameters are properly negotiated. This is necessary for DSA @@@ -1379,6 -1416,9 +1422,9 @@@ int phylink_ethtool_set_pauseparam(stru
ASSERT_RTNL();
+ if (pl->cur_link_an_mode == MLO_AN_FIXED) + return -EOPNOTSUPP; + if (!phylink_test(pl->supported, Pause) && !phylink_test(pl->supported, Asym_Pause)) return -EOPNOTSUPP; @@@ -1387,8 -1427,8 +1433,8 @@@ !pause->autoneg && pause->rx_pause != pause->tx_pause) return -EINVAL;
- config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK); - + mutex_lock(&pl->state_mutex); + config->pause = 0; if (pause->autoneg) config->pause |= MLO_PAUSE_AN; if (pause->rx_pause) @@@ -1396,6 -1436,22 +1442,22 @@@ if (pause->tx_pause) config->pause |= MLO_PAUSE_TX;
+ /* + * See the comments for linkmode_set_pause(), wrt the deficiencies + * with the current implementation. A solution to this issue would + * be: + * ethtool Local device + * rx tx Pause AsymDir + * 0 0 0 0 + * 1 0 1 1 + * 0 1 0 1 + * 1 1 1 1 + * and then use the ethtool rx/tx enablement status to mask the + * rx/tx pause resolution. + */ + linkmode_set_pause(config->advertising, pause->tx_pause, + pause->rx_pause); + /* If we have a PHY, phylib will call our link state function if the * mode has changed, which will trigger a resolve and update the MAC * configuration. @@@ -1405,19 -1461,10 +1467,10 @@@ pause->tx_pause); } else if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { - switch (pl->cur_link_an_mode) { - case MLO_AN_FIXED: - /* Should we allow fixed links to change against the config? */ - phylink_resolve_flow(pl, config); - phylink_mac_config(pl, config); - break; - - case MLO_AN_INBAND: - phylink_mac_config(pl, config); - phylink_mac_an_restart(pl); - break; - } + phylink_mac_config(pl, &pl->link_config); + phylink_mac_an_restart(pl); } + mutex_unlock(&pl->state_mutex);
return 0; } @@@ -1509,13 -1556,14 +1562,14 @@@ static int phylink_mii_emul_read(unsign struct phylink_link_state *state) { struct fixed_phy_status fs; + unsigned long *lpa = state->lp_advertising; int val;
fs.link = state->link; fs.speed = state->speed; fs.duplex = state->duplex; - fs.pause = state->pause & MLO_PAUSE_SYM; - fs.asym_pause = state->pause & MLO_PAUSE_ASYM; + fs.pause = test_bit(ETHTOOL_LINK_MODE_Pause_BIT, lpa); + fs.asym_pause = test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, lpa);
val = swphy_read_reg(reg, &fs); if (reg == MII_BMSR) { @@@ -1820,7 -1868,7 +1874,7 @@@ static int phylink_sfp_config(struct ph
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) - phylink_mac_config(pl, &pl->link_config); + phylink_mac_initial_config(pl);
return ret; } diff --combined drivers/net/usb/r8152.c index 95b19ce96513,f27fdd6ab86f..8f8d9883d363 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -891,7 -891,7 +891,7 @@@ struct fw_block struct fw_header { u8 checksum[32]; char version[RTL_VER_SIZE]; - struct fw_block blocks[0]; + struct fw_block blocks[]; } __packed;
/** @@@ -930,7 -930,7 +930,7 @@@ struct fw_mac __le32 reserved; __le16 fw_ver_reg; u8 fw_ver_data; - char info[0]; + char info[]; } __packed;
/** @@@ -982,7 -982,7 +982,7 @@@ struct fw_phy_nc __le16 bp_start; __le16 bp_num; __le16 bp[4]; - char info[0]; + char info[]; } __packed;
enum rtl_fw_type { @@@ -1948,29 -1948,6 +1948,6 @@@ drop } }
- /* msdn_giant_send_check() - * According to the document of microsoft, the TCP Pseudo Header excludes the - * packet length for IPv6 TCP large packets. - */ - static int msdn_giant_send_check(struct sk_buff *skb) - { - const struct ipv6hdr *ipv6h; - struct tcphdr *th; - int ret; - - ret = skb_cow_head(skb, 0); - if (ret) - return ret; - - ipv6h = ipv6_hdr(skb); - th = tcp_hdr(skb); - - th->check = 0; - th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); - - return ret; - } - static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) { if (skb_vlan_tag_present(skb)) { @@@ -2016,10 -1993,11 +1993,11 @@@ static int r8152_tx_csum(struct r8152 * break;
case htons(ETH_P_IPV6): - if (msdn_giant_send_check(skb)) { + if (skb_cow_head(skb, 0)) { ret = TX_CSUM_TSO; goto unavailable; } + tcp_v6_gso_csum_prep(skb); opts1 |= GTSENDV6; break;
@@@ -3221,8 -3199,6 +3199,8 @@@ static u16 r8153_phy_status(struct r815 }
msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; }
return data; @@@ -5404,10 -5380,7 +5382,10 @@@ static void r8153_init(struct r8152 *tp if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; }
data = r8153_phy_status(tp, 0); @@@ -5544,10 -5517,7 +5522,10 @@@ static void r8153b_init(struct r8152 *t if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; }
data = r8153_phy_status(tp, 0); @@@ -6375,6 -6345,7 +6353,7 @@@ static int rtl8152_set_ringparam(struc }
static const struct ethtool_ops ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, diff --combined drivers/net/wireless/mediatek/mt76/dma.c index 1847f55e199b,e5dd7080e88e..75e659774e07 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@@ -132,6 -132,11 +132,11 @@@ mt76_dma_sync_idx(struct mt76_dev *dev writel(q->ndesc, &q->regs->ring_size); q->head = readl(&q->regs->dma_idx); q->tail = q->head; + } + + static void + mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) + { writel(q->head, &q->regs->cpu_idx); }
@@@ -141,7 -146,7 +146,7 @@@ mt76_dma_tx_cleanup(struct mt76_dev *de struct mt76_sw_queue *sq = &dev->q_tx[qid]; struct mt76_queue *q = sq->q; struct mt76_queue_entry entry; - unsigned int n_swq_queued[4] = {}; + unsigned int n_swq_queued[8] = {}; unsigned int n_queued = 0; bool wake = false; int i, last; @@@ -178,15 -183,25 +183,25 @@@ spin_lock_bh(&q->lock);
q->queued -= n_queued; - for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) { + for (i = 0; i < 4; i++) { if (!n_swq_queued[i]) continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i]; }
- if (flush) + /* ext PHY */ + for (i = 0; i < 4; i++) { + if (!n_swq_queued[i]) + continue; + + dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i]; + } + + if (flush) { mt76_dma_sync_idx(dev, q); + mt76_dma_kick_queue(dev, q); + }
wake = wake && q->stopped && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; @@@ -238,7 -253,9 +253,9 @@@ mt76_dma_dequeue(struct mt76_dev *dev, if (!q->queued) return NULL;
- if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + if (flush) + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) return NULL;
q->tail = (q->tail + 1) % q->ndesc; @@@ -247,12 -264,6 +264,6 @@@ return mt76_dma_get_buf(dev, q, idx, len, info, more); }
- static void - mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) - { - writel(q->head, &q->regs->cpu_idx); - } - static int mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, struct sk_buff *skb, u32 tx_info) @@@ -261,10 -272,13 +272,13 @@@ struct mt76_queue_buf buf; dma_addr_t addr;
+ if (q->queued + 1 >= q->ndesc - 1) + goto error; + addr = dma_map_single(dev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev, addr))) - return -ENOMEM; + goto error;
buf.addr = addr; buf.len = skb->len; @@@ -275,6 -289,10 +289,10 @@@ spin_unlock_bh(&q->lock);
return 0; + + error: + dev_kfree_skb(skb); + return -ENOMEM; }
static int @@@ -286,6 -304,7 +304,7 @@@ mt76_dma_tx_queue_skb(struct mt76_dev * struct mt76_tx_info tx_info = { .skb = skb, }; + struct ieee80211_hw *hw; int len, n = 0, ret = -ENOMEM; struct mt76_queue_entry e; struct mt76_txwi_cache *t; @@@ -295,7 -314,8 +314,8 @@@
t = mt76_get_txwi(dev); if (!t) { - ieee80211_free_txskb(dev->hw, skb); + hw = mt76_tx_status_get_hw(dev, skb); + ieee80211_free_txskb(hw, skb); return -ENOMEM; } txwi = mt76_get_txwi_ptr(dev, t); @@@ -427,7 -447,7 +447,7 @@@ mt76_dma_rx_reset(struct mt76_dev *dev int i;
for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q); mt76_dma_sync_idx(dev, q); @@@ -447,13 -467,10 +467,13 @@@ mt76_add_fragment(struct mt76_dev *dev struct page *page = virt_to_head_page(data); int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; + struct skb_shared_info *shinfo = skb_shinfo(skb);
- offset += q->buf_offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, - q->buf_size); + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { + offset += q->buf_offset; + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, + q->buf_size); + }
if (more) return; @@@ -531,6 -548,7 +551,7 @@@ mt76_dma_rx_poll(struct napi_struct *na dev = container_of(napi->dev, struct mt76_dev, napi_dev); qid = napi - dev->napi;
+ local_bh_disable(); rcu_read_lock();
do { @@@ -540,6 -558,7 +561,7 @@@ } while (cur && done < budget);
rcu_read_unlock(); + local_bh_enable();
if (done < budget && napi_complete(napi)) dev->drv->rx_poll_complete(dev, qid); @@@ -558,7 -577,6 +580,6 @@@ mt76_dma_init(struct mt76_dev *dev netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 64); mt76_dma_rx_fill(dev, &dev->q_rx[i]); - skb_queue_head_init(&dev->rx_skb[i]); napi_enable(&dev->napi[i]); }
diff --combined drivers/pci/pci.c index e79cccbbdd39,86821313c007..0faa389deee0 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@@ -173,6 -173,29 +173,29 @@@ unsigned char pci_bus_max_busnr(struct } EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
+ /** + * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS + * @pdev: the PCI device + * + * Returns error bits set in PCI_STATUS and clears them. + */ + int pci_status_get_and_clear_errors(struct pci_dev *pdev) + { + u16 status; + int ret; + + ret = pci_read_config_word(pdev, PCI_STATUS, &status); + if (ret != PCIBIOS_SUCCESSFUL) + return -EIO; + + status &= PCI_STATUS_ERROR_BITS; + if (status) + pci_write_config_word(pdev, PCI_STATUS, status); + + return status; + } + EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); + #ifdef CONFIG_HAS_IOMEM void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) { @@@ -557,6 -580,40 +580,40 @@@ int pci_find_ext_capability(struct pci_ } EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+ /** + * pci_get_dsn - Read and return the 8-byte Device Serial Number + * @dev: PCI device to query + * + * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial + * Number. + * + * Returns the DSN, or zero if the capability does not exist. + */ + u64 pci_get_dsn(struct pci_dev *dev) + { + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; + } + EXPORT_SYMBOL_GPL(pci_get_dsn); + static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) { int rc, ttl = PCI_FIND_CAP_TTL; @@@ -5784,10 -5841,19 +5841,10 @@@ enum pci_bus_speed pcie_get_speed_cap(s * where only 2.5 GT/s and 5.0 GT/s speeds were defined. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); - if (lnkcap2) { /* PCIe r3.0-compliant */ - if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB) - return PCIE_SPEED_32_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) - return PCIE_SPEED_16_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) - return PCIE_SPEED_8_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) - return PCIE_SPEED_5_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) - return PCIE_SPEED_2_5GT; - return PCI_SPEED_UNKNOWN; - } + + /* PCIe r3.0-compliant */ + if (lnkcap2) + return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) @@@ -5863,14 -5929,14 +5920,14 @@@ void __pcie_print_link_status(struct pc if (bw_avail >= bw_cap && verbose) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", bw_cap / 1000, bw_cap % 1000, - PCIE_SPEED2STR(speed_cap), width_cap); + pci_speed_string(speed_cap), width_cap); else if (bw_avail < bw_cap) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", bw_avail / 1000, bw_avail % 1000, - PCIE_SPEED2STR(speed), width, + pci_speed_string(speed), width, limiting_dev ? pci_name(limiting_dev) : "<unknown>", bw_cap / 1000, bw_cap % 1000, - PCIE_SPEED2STR(speed_cap), width_cap); + pci_speed_string(speed_cap), width_cap); }
/** diff --combined drivers/s390/cio/qdio_setup.c index 973786fce9c1,66e4bdca9d89..23295f2c8e04 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@@ -224,8 -224,15 +224,15 @@@ static void setup_queues(struct qdio_ir setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1; - q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? - qdio_init->queue_start_poll_array[i] : NULL; + if (qdio_init->queue_start_poll_array && + qdio_init->queue_start_poll_array[i]) { + q->u.in.queue_start_poll = + qdio_init->queue_start_poll_array[i]; + set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state); + } else { + q->u.in.queue_start_poll = NULL; + }
setup_storage_lists(q, irq_ptr, input_sbal_array, i); input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; @@@ -451,10 -458,10 +458,10 @@@ static void setup_qib(struct qdio_irq * memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); }
-int qdio_setup_irq(struct qdio_initialize *init_data) +int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) { struct ciw *ciw; - struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; + struct ccw_device *cdev = init_data->cdev;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); @@@ -462,9 -469,8 +469,9 @@@ memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
- irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; - irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; + irq_ptr->debugfs_dev = NULL; + irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0; + irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
/* wipes qib.ac, required by ar7063 */ memset(irq_ptr->qdr, 0, sizeof(struct qdr)); @@@ -472,9 -478,9 +479,9 @@@ irq_ptr->int_parm = init_data->int_parm; irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_output_qs = init_data->no_output_qs; - irq_ptr->cdev = init_data->cdev; + irq_ptr->cdev = cdev; irq_ptr->scan_threshold = init_data->scan_threshold; - ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); + ccw_device_get_schid(cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data); @@@ -490,14 -496,14 +497,14 @@@ /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* get qdio commands */ - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE); if (!ciw) { DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); return -EINVAL; } irq_ptr->equeue = *ciw;
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE); if (!ciw) { DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); return -EINVAL; @@@ -505,10 -511,10 +512,10 @@@ irq_ptr->aqueue = *ciw;
/* set new interrupt handler */ - spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev)); - irq_ptr->orig_handler = init_data->cdev->handler; - init_data->cdev->handler = qdio_int_handler; - spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); + spin_lock_irq(get_ccwdev_lock(cdev)); + irq_ptr->orig_handler = cdev->handler; + cdev->handler = qdio_int_handler; + spin_unlock_irq(get_ccwdev_lock(cdev)); return 0; }
diff --combined drivers/s390/net/qeth_core.h index 468cada49e72,b7d64690ea38..962be94ed3ca --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@@ -189,6 -189,8 +189,8 @@@ struct qeth_vnicc_info #define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */ #define QETH_IQD_MCAST_TXQ 0 #define QETH_IQD_MIN_UCAST_TXQ 1 + + #define QETH_RX_COPYBREAK (PAGE_SIZE >> 1) #define QETH_IN_BUF_SIZE_DEFAULT 65536 #define QETH_IN_BUF_COUNT_DEFAULT 64 #define QETH_IN_BUF_COUNT_HSDEFAULT 128 @@@ -219,9 -221,6 +221,6 @@@ #define QETH_HIGH_WATERMARK_PACK 5 #define QETH_WATERMARK_PACK_FUZZ 1
- /* large receive scatter gather copy break */ - #define QETH_RX_SG_CB (PAGE_SIZE >> 1) - struct qeth_hdr_layer3 { __u8 id; __u8 flags; @@@ -369,7 -368,7 +368,7 @@@ enum qeth_qdio_info_states struct qeth_buffer_pool_entry { struct list_head list; struct list_head init_list; - void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; + struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; };
struct qeth_qdio_buffer_pool { @@@ -711,7 -710,6 +710,6 @@@ struct qeth_card_options struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; enum qeth_discipline_id layer; - int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; enum qeth_ipa_isolation_modes prev_isolation; int sniffer; @@@ -770,6 -768,10 +768,10 @@@ struct qeth_switch_info __u32 settings; };
+ struct qeth_priv { + unsigned int rx_copybreak; + }; + #define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card { @@@ -983,7 -985,7 +985,7 @@@ extern const struct attribute_group qet extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *); -int qeth_realloc_buffer_pool(struct qeth_card *, int); +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); void qeth_core_free_discipline(struct qeth_card *);
diff --combined drivers/s390/net/qeth_core_main.c index 6d3f2f14b414,37c17ad8ee25..6caa78d51bd1 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -65,6 -65,7 +65,6 @@@ static struct lock_class_key qdio_out_s static void qeth_issue_next_read_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned int data_length); -static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, @@@ -211,121 -212,49 +211,121 @@@ void qeth_clear_working_pool_list(struc } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { + if (entry->elements[i]) + __free_page(entry->elements[i]); + } + + kfree(entry); +} + +static void qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } +} + +static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) +{ + struct qeth_buffer_pool_entry *entry; + unsigned int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + for (i = 0; i < pages; i++) { + entry->elements[i] = alloc_page(GFP_KERNEL); + + if (!entry->elements[i]) { + qeth_free_pool_entry(entry); + return NULL; + } + } + + return entry; +} + static int qeth_alloc_buffer_pool(struct qeth_card *card) { - struct qeth_buffer_pool_entry *pool_entry; - void *ptr; - int i, j; + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + unsigned int i;
QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { - pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); - if (!pool_entry) { + struct qeth_buffer_pool_entry *entry; + + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { qeth_free_buffer_pool(card); return -ENOMEM; } - for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { - ptr = (void *) __get_free_page(GFP_KERNEL); - if (!ptr) { - while (j > 0) - free_page((unsigned long) - pool_entry->elements[--j]); - kfree(pool_entry); - qeth_free_buffer_pool(card); - return -ENOMEM; - } - pool_entry->elements[j] = ptr; - } - list_add(&pool_entry->init_list, - &card->qdio.init_pool.entry_list); + + list_add(&entry->init_list, &card->qdio.init_pool.entry_list); } return 0; }
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) { + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; + struct qeth_buffer_pool_entry *entry, *tmp; + int delta = count - pool->buf_count; + LIST_HEAD(entries); + QETH_CARD_TEXT(card, 2, "realcbp");
- /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ - qeth_clear_working_pool_list(card); - qeth_free_buffer_pool(card); - card->qdio.in_buf_pool.buf_count = bufcnt; - card->qdio.init_pool.buf_count = bufcnt; - return qeth_alloc_buffer_pool(card); + /* Defer until queue is allocated: */ + if (!card->qdio.in_q) + goto out; + + /* Remove entries from the pool: */ + while (delta < 0) { + entry = list_first_entry(&pool->entry_list, + struct qeth_buffer_pool_entry, + init_list); + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + + delta++; + } + + /* Allocate additional entries: */ + while (delta > 0) { + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { + list_for_each_entry_safe(entry, tmp, &entries, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } + + return -ENOMEM; + } + + list_add(&entry->init_list, &entries); + + delta--; + } + + list_splice(&entries, &pool->entry_list); + +out: + card->qdio.in_buf_pool.buf_count = count; + pool->buf_count = count; + return 0; } -EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); +EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
static void qeth_free_qdio_queue(struct qeth_qdio_q *q) { @@@ -813,7 -742,7 +813,7 @@@ static void qeth_issue_next_read_cb(str /* fall through */ default: qeth_clear_ipacmd_list(card); - goto out; + goto err_idx; }
cmd = __ipa_reply(iob); @@@ -866,8 -795,9 +866,9 @@@ out memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), QETH_SEQ_NO_LENGTH); - qeth_put_cmd(iob); __qeth_issue_next_read(card); + err_idx: + qeth_put_cmd(iob); }
static int qeth_set_thread_start_bit(struct qeth_card *card, @@@ -1241,6 -1171,19 +1242,6 @@@ void qeth_drain_output_queues(struct qe } EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
-static void qeth_free_buffer_pool(struct qeth_card *card) -{ - struct qeth_buffer_pool_entry *pool_entry, *tmp; - int i = 0; - list_for_each_entry_safe(pool_entry, tmp, - &card->qdio.init_pool.entry_list, init_list){ - for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) - free_page((unsigned long)pool_entry->elements[i]); - list_del(&pool_entry->init_list); - kfree(pool_entry); - } -} - static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) { unsigned int count = single ? 1 : card->dev->num_tx_queues; @@@ -1262,6 -1205,7 +1263,6 @@@ if (count == 1) dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
- card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; card->qdio.no_out_queues = count; return 0; } @@@ -1314,7 -1258,6 +1315,6 @@@ static void qeth_set_initial_options(st { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; - card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; card->options.layer = QETH_DISCIPLINE_UNDETERMINED; @@@ -1682,17 -1625,16 +1682,16 @@@ static void qeth_set_blkt_defaults(stru } }
- static void qeth_init_tokens(struct qeth_card *card) + static void qeth_idx_init(struct qeth_card *card) { + memset(&card->seqno, 0, sizeof(card->seqno)); + card->token.issuer_rm_w = 0x00010103UL; card->token.cm_filter_w = 0x00010108UL; card->token.cm_connection_w = 0x0001010aUL; card->token.ulp_filter_w = 0x0001010bUL; card->token.ulp_connection_w = 0x0001010dUL; - }
- static void qeth_init_func_level(struct qeth_card *card) - { switch (card->info.type) { case QETH_CARD_TYPE_IQD: card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; @@@ -2450,6 -2392,7 +2449,6 @@@ static void qeth_free_qdio_queues(struc return;
qeth_free_cq(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); @@@ -2631,6 -2574,7 +2630,6 @@@ static struct qeth_buffer_pool_entry *q struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; - struct page *page;
if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; @@@ -2639,7 -2583,7 +2638,7 @@@ entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { + if (page_count(entry->elements[i]) > 1) { free = 0; break; } @@@ -2654,15 -2598,15 +2653,15 @@@ entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { - page = alloc_page(GFP_ATOMIC); - if (!page) { + if (page_count(entry->elements[i]) > 1) { + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) return NULL; - } else { - free_page((unsigned long)entry->elements[i]); - entry->elements[i] = page_address(page); - QETH_CARD_STAT_INC(card, rx_sg_alloc_page); - } + + __free_page(entry->elements[i]); + entry->elements[i] = page; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); } } list_del_init(&entry->list); @@@ -2680,12 -2624,12 +2679,12 @@@ static int qeth_init_input_buffer(struc ETH_HLEN + sizeof(struct ipv6hdr)); if (!buf->rx_skb) - return 1; + return -ENOMEM; }
pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) - return 1; + return -ENOBUFS;
/* * since the buffer is accessed only from the input_tasklet @@@ -2698,7 -2642,7 +2697,7 @@@ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = - virt_to_phys(pool_entry->elements[i]); + page_to_phys(pool_entry->elements[i]); if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else @@@ -2730,15 -2674,10 +2729,15 @@@ static int qeth_init_qdio_queues(struc /* inbound queue */ qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); memset(&card->rx, 0, sizeof(struct qeth_rx)); + qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) - qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, @@@ -3467,8 -3406,7 +3466,7 @@@ static void qeth_qdio_start_poll(struc { struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev->flags & IFF_UP) - napi_schedule_irqoff(&card->napi); + napi_schedule_irqoff(&card->napi); }
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) @@@ -5012,9 -4950,9 +5010,9 @@@ retriable else goto retry; } + qeth_determine_capabilities(card); - qeth_init_tokens(card); - qeth_init_func_level(card); + qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card); if (rc == -EINTR) { @@@ -5329,6 -5267,7 +5327,7 @@@ static int qeth_extract_skb(struct qeth int *__offset) { struct qdio_buffer_element *element = *__element; + struct qeth_priv *priv = netdev_priv(card->dev); struct qdio_buffer *buffer = qethbuffer->buffer; struct napi_struct *napi = &card->napi; unsigned int linear_len = 0; @@@ -5404,7 -5343,7 +5403,7 @@@ next_packet }
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || - (skb_len > card->options.rx_sg_cb && + (skb_len > READ_ONCE(priv->rx_copybreak) && !atomic_read(&card->force_alloc_skb) && !IS_OSN(card));
@@@ -5953,25 -5892,30 +5952,30 @@@ static void qeth_clear_dbf_list(void static struct net_device *qeth_alloc_netdev(struct qeth_card *card) { struct net_device *dev; + struct qeth_priv *priv;
switch (card->info.type) { case QETH_CARD_TYPE_IQD: - dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, + dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, ether_setup, QETH_MAX_QUEUES, 1); break; case QETH_CARD_TYPE_OSM: - dev = alloc_etherdev(0); + dev = alloc_etherdev(sizeof(*priv)); break; case QETH_CARD_TYPE_OSN: - dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); + dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, + ether_setup); break; default: - dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); + dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_QUEUES, 1); }
if (!dev) return NULL;
+ priv = netdev_priv(dev); + priv->rx_copybreak = QETH_RX_COPYBREAK; + dev->ml_priv = card; dev->watchdog_timeo = QETH_TX_TIMEOUT; dev->min_mtu = IS_OSN(card) ? 64 : 576; @@@ -6638,9 -6582,6 +6642,6 @@@ int qeth_open(struct net_device *dev
QETH_CARD_TEXT(card, 4, "qethopen");
- if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) - return -EIO; - card->data.state = CH_STATE_UP; netif_tx_start_all_queues(dev);
@@@ -6690,6 -6631,8 +6691,8 @@@ int qeth_stop(struct net_device *dev }
napi_disable(&card->napi); + qdio_stop_irq(CARD_DDEV(card), 0); + return 0; } EXPORT_SYMBOL_GPL(qeth_stop); diff --combined drivers/s390/net/qeth_l2_main.c index 8fb29371788b,0bf5e7133229..4c8e93132e08 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@@ -284,7 -284,6 +284,7 @@@ static void qeth_l2_stop_card(struct qe if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; }
@@@ -1568,23 -1567,11 +1568,11 @@@ static int qeth_l2_vnicc_makerc(struct return rc; }
- /* generic VNICC request call back control */ - struct _qeth_l2_vnicc_request_cbctl { - struct { - union{ - u32 *sup_cmds; - u32 *timeout; - }; - } result; - }; - /* generic VNICC request call back */ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct _qeth_l2_vnicc_request_cbctl *cbctl = - (struct _qeth_l2_vnicc_request_cbctl *) reply->param; struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; u32 sub_cmd = cmd->data.vnicc.hdr.sub_command; @@@ -1597,9 -1584,9 +1585,9 @@@ card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (sub_cmd == IPA_VNICC_QUERY_CMDS) - *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds; + *(u32 *)reply->param = rep->data.query_cmds.sup_cmds; else if (sub_cmd == IPA_VNICC_GET_TIMEOUT) - *cbctl->result.timeout = rep->data.getset_timeout.timeout; + *(u32 *)reply->param = rep->data.getset_timeout.timeout;
return 0; } @@@ -1640,7 -1627,6 +1628,6 @@@ static int qeth_l2_vnicc_query_chars(st static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, u32 *sup_cmds) { - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqcm"); @@@ -1651,10 -1637,7 +1638,7 @@@
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
- /* prepare callback control */ - cbctl.result.sup_cmds = sup_cmds; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds); }
/* VNICC enable/disable characteristic request */ @@@ -1678,7 -1661,6 +1662,6 @@@ static int qeth_l2_vnicc_getset_timeout u32 cmd, u32 *timeout) { struct qeth_vnicc_getset_timeout *getset_timeout; - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccgst"); @@@ -1693,11 -1675,7 +1676,7 @@@ if (cmd == IPA_VNICC_SET_TIMEOUT) getset_timeout->timeout = *timeout;
- /* prepare callback control */ - if (cmd == IPA_VNICC_GET_TIMEOUT) - cbctl.result.timeout = timeout; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout); }
/* set current VNICC flag state; called from sysfs store function */ diff --combined drivers/s390/net/qeth_l3_main.c index 82f800d1d7b3,1000e18c1090..8a803d6c9357 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@@ -920,9 -920,11 +920,11 @@@ static int qeth_l3_iqd_read_initial_mac
if (cmd->hdr.return_code) return -EIO; + if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr)) + return -EADDRNOTAVAIL;
ether_addr_copy(card->dev->dev_addr, - cmd->data.create_destroy_addr.unique_id); + cmd->data.create_destroy_addr.mac_addr); return 0; }
@@@ -930,7 -932,6 +932,6 @@@ static int qeth_l3_iqd_read_initial_mac { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "hsrmac");
@@@ -938,9 -939,6 +939,6 @@@ IPA_DATA_SIZEOF(create_destroy_addr)); if (!iob) return -ENOMEM; - cmd = __ipa_cmd(iob); - *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = - card->info.unique_id;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, NULL); @@@ -953,8 -951,7 +951,7 @@@ static int qeth_l3_get_unique_id_cb(str struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0) { - card->info.unique_id = *((__u16 *) - &cmd->data.create_destroy_addr.unique_id[6]); + card->info.unique_id = cmd->data.create_destroy_addr.uid; return 0; }
@@@ -968,7 -965,6 +965,6 @@@ static int qeth_l3_get_unique_id(struc { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "guniqeid");
@@@ -982,10 -978,8 +978,8 @@@ IPA_DATA_SIZEOF(create_destroy_addr)); if (!iob) return -ENOMEM; - cmd = __ipa_cmd(iob); - *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = - card->info.unique_id;
+ __ipa_cmd(iob)->data.create_destroy_addr.uid = card->info.unique_id; rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); return rc; } @@@ -1178,7 -1172,6 +1172,7 @@@ static void qeth_l3_stop_card(struct qe qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; }
diff --combined fs/sysfs/group.c index 1e2a096057bc,5afe0e7ff7cd..fbb117757c52 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@@ -13,6 -13,7 +13,7 @@@ #include <linux/dcache.h> #include <linux/namei.h> #include <linux/err.h> + #include <linux/fs.h> #include "sysfs.h"
@@@ -424,25 -425,6 +425,25 @@@ EXPORT_SYMBOL_GPL(sysfs_remove_link_fro int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name) +{ + return compat_only_sysfs_link_entry_to_kobj(kobj, target_kobj, + target_name, NULL); +} +EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj); + +/** + * compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing + * to a group or an attribute + * @kobj: The kobject containing the group. + * @target_kobj: The target kobject. + * @target_name: The name of the target group or attribute. + * @symlink_name: The name of the symlink file (target_name will be + * considered if symlink_name is NULL). + */ +int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name) { struct kernfs_node *target; struct kernfs_node *entry; @@@ -467,15 -449,126 +468,129 @@@ return -ENOENT; }
- link = kernfs_create_link(kobj->sd, target_name, entry); + if (!symlink_name) + symlink_name = target_name; + + link = kernfs_create_link(kobj->sd, symlink_name, entry); if (PTR_ERR(link) == -EEXIST) - sysfs_warn_dup(kobj->sd, target_name); + sysfs_warn_dup(kobj->sd, symlink_name);
kernfs_put(entry); kernfs_put(target); return PTR_ERR_OR_ZERO(link); } -EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj); +EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj); + + static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn, + const struct attribute_group *grp, + struct iattr *newattrs) + { + struct kernfs_node *kn; + int error; + + if (grp->attrs) { + struct attribute *const *attr; + + for (attr = grp->attrs; *attr; attr++) { + kn = kernfs_find_and_get(grp_kn, (*attr)->name); + if (!kn) + return -ENOENT; + + error = kernfs_setattr(kn, newattrs); + kernfs_put(kn); + if (error) + return error; + } + } + + if (grp->bin_attrs) { + struct bin_attribute *const *bin_attr; + + for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) { + kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name); + if (!kn) + return -ENOENT; + + error = kernfs_setattr(kn, newattrs); + kernfs_put(kn); + if (error) + return error; + } + } + + return 0; + } + + /** + * sysfs_group_change_owner - change owner of an attribute group. + * @kobj: The kobject containing the group. + * @grp: The attribute group. + * @kuid: new owner's kuid + * @kgid: new owner's kgid + * + * Returns 0 on success or error code on failure. + */ + int sysfs_group_change_owner(struct kobject *kobj, + const struct attribute_group *grp, kuid_t kuid, + kgid_t kgid) + { + struct kernfs_node *grp_kn; + int error; + struct iattr newattrs = { + .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = kuid, + .ia_gid = kgid, + }; + + if (!kobj->state_in_sysfs) + return -EINVAL; + + if (grp->name) { + grp_kn = kernfs_find_and_get(kobj->sd, grp->name); + } else { + kernfs_get(kobj->sd); + grp_kn = kobj->sd; + } + if (!grp_kn) + return -ENOENT; + + error = kernfs_setattr(grp_kn, &newattrs); + if (!error) + error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs); + + kernfs_put(grp_kn); + + return error; + } + EXPORT_SYMBOL_GPL(sysfs_group_change_owner); + + /** + * sysfs_groups_change_owner - change owner of a set of attribute groups. + * @kobj: The kobject containing the groups. + * @groups: The attribute groups. + * @kuid: new owner's kuid + * @kgid: new owner's kgid + * + * Returns 0 on success or error code on failure. + */ + int sysfs_groups_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid) + { + int error = 0, i; + + if (!kobj->state_in_sysfs) + return -EINVAL; + + if (!groups) + return 0; + + for (i = 0; groups[i]; i++) { + error = sysfs_group_change_owner(kobj, groups[i], kuid, kgid); + if (error) + break; + } + + return error; + } + EXPORT_SYMBOL_GPL(sysfs_groups_change_owner); diff --combined include/linux/device.h index fa04dfd22bbc,3e40533d2037..1311f276f533 --- a/include/linux/device.h +++ b/include/linux/device.h @@@ -798,17 -798,6 +798,17 @@@ static inline struct device_node *dev_o return dev->of_node; }
+static inline bool dev_has_sync_state(struct device *dev) +{ + if (!dev) + return false; + if (dev->driver && dev->driver->sync_state) + return true; + if (dev->bus && dev->bus->sync_state) + return true; + return false; +} + /* * High level routines for use by the bus drivers */ @@@ -828,6 -817,7 +828,7 @@@ extern struct device *device_find_child extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); + extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); diff --combined include/linux/inet_diag.h index c91cf2dee12a,e4ba25d63913..ce9ed1c0602f --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@@ -2,19 -2,22 +2,17 @@@ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1
+#include <net/netlink.h> #include <uapi/linux/inet_diag.h>
-struct net; -struct sock; struct inet_hashinfo; -struct nlattr; -struct nlmsghdr; -struct sk_buff; -struct netlink_callback;
struct inet_diag_handler { void (*dump)(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + int (*dump_one)(struct netlink_callback *cb, const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk, @@@ -35,18 -38,25 +33,25 @@@ __u16 idiag_info_size; };
+ struct bpf_sk_storage_diag; + struct inet_diag_dump_data { + struct nlattr *req_nlas[__INET_DIAG_REQ_MAX]; + #define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE] + #define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES] + + struct bpf_sk_storage_diag *bpf_stg_diag; + }; + struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin); + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req);
struct sock *inet_diag_find_one_icsk(struct net *net, @@@ -57,17 -67,6 +62,17 @@@ int inet_diag_bc_sk(const struct nlatt
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+static inline size_t inet_diag_msg_attrs_size(void) +{ + return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ +#if IS_ENABLED(CONFIG_IPV6) + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ +#endif + + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4); /* INET_DIAG_CLASS_ID */ +} int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin); diff --combined include/linux/pci.h index 76f4806a154c,fc54b8922e66..6c9eb5923598 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@@ -42,6 -42,13 +42,13 @@@
#include <linux/pci_ids.h>
+ #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_SIG_TARGET_ABORT | \ + PCI_STATUS_PARITY) + /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded @@@ -236,7 -243,7 +243,7 @@@ enum pcie_link_width PCIE_LNK_WIDTH_UNKNOWN = 0xff, };
-/* Based on the PCI Hotplug Spec, but some values are made up by us */ +/* See matching string table in pci_speed_string() */ enum pci_bus_speed { PCI_SPEED_33MHz = 0x00, PCI_SPEED_66MHz = 0x01, @@@ -1045,6 -1052,8 +1052,8 @@@ int pci_find_ht_capability(struct pci_d int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+ u64 pci_get_dsn(struct pci_dev *dev); + struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, @@@ -1203,6 -1212,7 +1212,7 @@@ int pci_select_bars(struct pci_dev *dev bool pci_device_is_present(struct pci_dev *pdev); void pci_ignore_hotplug(struct pci_dev *dev); struct pci_dev *pci_real_dma_dev(struct pci_dev *dev); + int pci_status_get_and_clear_errors(struct pci_dev *pdev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, @@@ -1699,6 -1709,9 +1709,9 @@@ static inline int pci_find_next_capabil static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) { return 0; }
+ static inline u64 pci_get_dsn(struct pci_dev *dev) + { return 0; } + /* Power management related routines */ static inline int pci_save_state(struct pci_dev *dev) { return 0; } static inline void pci_restore_state(struct pci_dev *dev) { } diff --combined include/linux/phy.h index 22f5e763e894,e72dbd0d2d6a..082741198c18 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@@ -289,6 -289,7 +289,7 @@@ static inline struct mii_bus *devm_mdio return devm_mdiobus_alloc_size(dev, 0); }
+ struct mii_bus *mdio_find_bus(const char *mdio_name); void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
@@@ -557,7 -558,6 +558,7 @@@ struct phy_driver /* * Checks if the PHY generated an interrupt. * For multi-PHY devices with shared PHY interrupt pin + * Set interrupt bits have to be cleared. */ int (*did_interrupt)(struct phy_device *phydev);
@@@ -1258,6 -1258,9 +1259,9 @@@ void phy_set_sym_pause(struct phy_devic void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); + void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); + void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, + bool *tx_pause, bool *rx_pause);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --combined include/linux/sysfs.h index 7462315a643b,9e531ec76274..aae751521d3a --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@@ -300,10 -300,6 +300,10 @@@ void sysfs_remove_link_from_group(struc int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name); +int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
@@@ -314,6 -310,18 +314,18 @@@ static inline void sysfs_enable_ns(stru return kernfs_enable_ns(kn); }
+ int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, + kgid_t kgid); + int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); + int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, + const char *name, kuid_t kuid, kgid_t kgid); + int sysfs_groups_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid); + int sysfs_group_change_owner(struct kobject *kobj, + const struct attribute_group *groups, kuid_t kuid, + kgid_t kgid); + #else /* CONFIG_SYSFS */
static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@@ -512,14 -520,6 +524,14 @@@ static inline int __compat_only_sysfs_l return 0; }
+static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name) +{ + return 0; +} + static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { @@@ -534,6 -534,40 +546,40 @@@ static inline void sysfs_enable_ns(stru { }
+ static inline int sysfs_file_change_owner(struct kobject *kobj, + const char *name, kuid_t kuid, + kgid_t kgid) + { + return 0; + } + + static inline int sysfs_link_change_owner(struct kobject *kobj, + struct kobject *targ, + const char *name, kuid_t kuid, + kgid_t kgid) + { + return 0; + } + + static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) + { + return 0; + } + + static inline int sysfs_groups_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid) + { + return 0; + } + + static inline int sysfs_group_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid) + { + return 0; + } + #endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --combined include/net/drop_monitor.h index f68bc373544a,ddd441a60e03..3f5b6ddb3179 --- a/include/net/drop_monitor.h +++ b/include/net/drop_monitor.h @@@ -6,20 -6,23 +6,23 @@@ #include <linux/ktime.h> #include <linux/netdevice.h> #include <linux/skbuff.h> + #include <net/flow_offload.h>
/** * struct net_dm_hw_metadata - Hardware-supplied packet metadata. * @trap_group_name: Hardware trap group name. * @trap_name: Hardware trap name. * @input_dev: Input netdevice. + * @fa_cookie: Flow action user cookie. */ struct net_dm_hw_metadata { const char *trap_group_name; const char *trap_name; struct net_device *input_dev; + const struct flow_action_cookie *fa_cookie; };
-#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) +#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR) void net_dm_hw_report(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata); #else diff --combined kernel/bpf/bpf_struct_ops.c index 68a89a9f7ccd,c498f0fffb40..77f271b305f5 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@@ -23,7 -23,7 +23,7 @@@ enum bpf_struct_ops_state
struct bpf_struct_ops_value { BPF_STRUCT_OPS_COMMON_VALUE; - char data[0] ____cacheline_aligned_in_smp; + char data[] ____cacheline_aligned_in_smp; };
struct bpf_struct_ops_map { @@@ -482,21 -482,13 +482,21 @@@ static int bpf_struct_ops_map_delete_el prev_state = cmpxchg(&st_map->kvalue.state, BPF_STRUCT_OPS_STATE_INUSE, BPF_STRUCT_OPS_STATE_TOBEFREE); - if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) { + switch (prev_state) { + case BPF_STRUCT_OPS_STATE_INUSE: st_map->st_ops->unreg(&st_map->kvalue.data); if (refcount_dec_and_test(&st_map->kvalue.refcnt)) bpf_map_put(map); + return 0; + case BPF_STRUCT_OPS_STATE_TOBEFREE: + return -EINPROGRESS; + case BPF_STRUCT_OPS_STATE_INIT: + return -ENOENT; + default: + WARN_ON_ONCE(1); + /* Should never happen. Treat it as not found. */ + return -ENOENT; } - - return 0; }
static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, diff --combined kernel/bpf/syscall.c index 0c7fb0d4836d,c536c65256ad..7ff410430f5d --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@@ -171,11 -171,7 +171,7 @@@ static int bpf_map_update_value(struct flags); }
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering from - * inside bpf map update or delete otherwise deadlocks are possible - */ - preempt_disable(); - __this_cpu_inc(bpf_prog_active); + bpf_disable_instrumentation(); if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { err = bpf_percpu_hash_update(map, key, value, flags); @@@ -206,8 -202,7 +202,7 @@@ err = map->ops->map_update_elem(map, key, value, flags); rcu_read_unlock(); } - __this_cpu_dec(bpf_prog_active); - preempt_enable(); + bpf_enable_instrumentation(); maybe_wait_bpf_programs(map);
return err; @@@ -222,8 -217,7 +217,7 @@@ static int bpf_map_copy_value(struct bp if (bpf_map_is_dev_bound(map)) return bpf_map_offload_lookup_elem(map, key, value);
- preempt_disable(); - this_cpu_inc(bpf_prog_active); + bpf_disable_instrumentation(); if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { err = bpf_percpu_hash_copy(map, key, value); @@@ -268,8 -262,7 +262,7 @@@ rcu_read_unlock(); }
- this_cpu_dec(bpf_prog_active); - preempt_enable(); + bpf_enable_instrumentation(); maybe_wait_bpf_programs(map);
return err; @@@ -909,6 -902,21 +902,21 @@@ void bpf_map_inc_with_uref(struct bpf_m } EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
+ struct bpf_map *bpf_map_get(u32 ufd) + { + struct fd f = fdget(ufd); + struct bpf_map *map; + + map = __bpf_map_get(f); + if (IS_ERR(map)) + return map; + + bpf_map_inc(map); + fdput(f); + + return map; + } + struct bpf_map *bpf_map_get_with_uref(u32 ufd) { struct fd f = fdget(ufd); @@@ -1136,13 -1144,11 +1144,11 @@@ static int map_delete_elem(union bpf_at goto out; }
- preempt_disable(); - __this_cpu_inc(bpf_prog_active); + bpf_disable_instrumentation(); rcu_read_lock(); err = map->ops->map_delete_elem(map, key); rcu_read_unlock(); - __this_cpu_dec(bpf_prog_active); - preempt_enable(); + bpf_enable_instrumentation(); maybe_wait_bpf_programs(map); out: kfree(key); @@@ -1254,13 -1260,11 +1260,11 @@@ int generic_map_delete_batch(struct bpf break; }
- preempt_disable(); - __this_cpu_inc(bpf_prog_active); + bpf_disable_instrumentation(); rcu_read_lock(); err = map->ops->map_delete_elem(map, key); rcu_read_unlock(); - __this_cpu_dec(bpf_prog_active); - preempt_enable(); + bpf_enable_instrumentation(); maybe_wait_bpf_programs(map); if (err) break; @@@ -1510,11 -1514,6 +1514,11 @@@ static int map_freeze(const union bpf_a if (IS_ERR(map)) return PTR_ERR(map);
+ if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { + fdput(f); + return -ENOTSUPP; + } + mutex_lock(&map->freeze_mutex);
if (map->writecnt) { diff --combined kernel/trace/bpf_trace.c index 68250d433bd7,07764c761073..8eebc30ae7e2 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@@ -83,7 -83,7 +83,7 @@@ unsigned int trace_call_bpf(struct trac if (in_nmi()) /* not supported yet */ return 1;
- preempt_disable(); + cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { /* @@@ -115,11 -115,9 +115,9 @@@
out: __this_cpu_dec(bpf_prog_active); - preempt_enable();
return ret; } - EXPORT_SYMBOL_GPL(trace_call_bpf);
#ifdef CONFIG_BPF_KPROBE_OVERRIDE BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) @@@ -732,7 -730,7 +730,7 @@@ static int bpf_send_signal_common(u32 s if (unlikely(!nmi_uaccess_okay())) return -EPERM;
- if (in_nmi()) { + if (irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ @@@ -843,6 -841,8 +841,8 @@@ tracing_func_proto(enum bpf_func_id fun return &bpf_send_signal_proto; case BPF_FUNC_send_signal_thread: return &bpf_send_signal_thread_proto; + case BPF_FUNC_perf_event_read_value: + return &bpf_perf_event_read_value_proto; default: return NULL; } @@@ -858,8 -858,6 +858,6 @@@ kprobe_prog_func_proto(enum bpf_func_i return &bpf_get_stackid_proto; case BPF_FUNC_get_stack: return &bpf_get_stack_proto; - case BPF_FUNC_perf_event_read_value: - return &bpf_perf_event_read_value_proto; #ifdef CONFIG_BPF_KPROBE_OVERRIDE case BPF_FUNC_override_return: return &bpf_override_return_proto; @@@ -1028,6 -1026,45 +1026,45 @@@ static const struct bpf_func_proto bpf_ .arg3_type = ARG_CONST_SIZE, };
+ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, + void *, buf, u32, size, u64, flags) + { + #ifndef CONFIG_X86 + return -ENOENT; + #else + static const u32 br_entry_size = sizeof(struct perf_branch_entry); + struct perf_branch_stack *br_stack = ctx->data->br_stack; + u32 to_copy; + + if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) + return -EINVAL; + + if (unlikely(!br_stack)) + return -EINVAL; + + if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) + return br_stack->nr * br_entry_size; + + if (!buf || (size % br_entry_size != 0)) + return -EINVAL; + + to_copy = min_t(u32, br_stack->nr * br_entry_size, size); + memcpy(buf, br_stack->entries, to_copy); + + return to_copy; + #endif + } + + static const struct bpf_func_proto bpf_read_branch_records_proto = { + .func = bpf_read_branch_records, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM_OR_NULL, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + }; + static const struct bpf_func_proto * pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@@ -1040,6 -1077,8 +1077,8 @@@ return &bpf_get_stack_proto_tp; case BPF_FUNC_perf_prog_read_value: return &bpf_perf_prog_read_value_proto; + case BPF_FUNC_read_branch_records: + return &bpf_read_branch_records_proto; default: return tracing_func_proto(func_id, prog); } @@@ -1475,10 -1514,9 +1514,9 @@@ void bpf_put_raw_tracepoint(struct bpf_ static __always_inline void __bpf_trace_run(struct bpf_prog *prog, u64 *args) { + cant_sleep(); rcu_read_lock(); - preempt_disable(); (void) BPF_PROG_RUN(prog, args); - preempt_enable(); rcu_read_unlock(); }
diff --combined net/core/devlink.c index b831c5545d6a,e8ccea9035c8..f51bebc8c33f --- a/net/core/devlink.c +++ b/net/core/devlink.c @@@ -545,6 -545,7 +545,7 @@@ static int devlink_nl_port_attrs_put(st case DEVLINK_PORT_FLAVOUR_PHYSICAL: case DEVLINK_PORT_FLAVOUR_CPU: case DEVLINK_PORT_FLAVOUR_DSA: + case DEVLINK_PORT_FLAVOUR_VIRTUAL: if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->phys.port_number)) return -EMSGSIZE; @@@ -3352,41 -3353,34 +3353,41 @@@ devlink_param_value_get_from_info(cons struct genl_info *info, union devlink_param_value *value) { + struct nlattr *param_data; int len;
- if (param->type != DEVLINK_PARAM_TYPE_BOOL && - !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) + param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]; + + if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data) return -EINVAL;
switch (param->type) { case DEVLINK_PARAM_TYPE_U8: - value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u8)) + return -EINVAL; + value->vu8 = nla_get_u8(param_data); break; case DEVLINK_PARAM_TYPE_U16: - value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u16)) + return -EINVAL; + value->vu16 = nla_get_u16(param_data); break; case DEVLINK_PARAM_TYPE_U32: - value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u32)) + return -EINVAL; + value->vu32 = nla_get_u32(param_data); break; case DEVLINK_PARAM_TYPE_STRING: - len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]), - nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); - if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) || + len = strnlen(nla_data(param_data), nla_len(param_data)); + if (len == nla_len(param_data) || len >= __DEVLINK_PARAM_MAX_STRING_VALUE) return -EINVAL; - strcpy(value->vstr, - nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); + strcpy(value->vstr, nla_data(param_data)); break; case DEVLINK_PARAM_TYPE_BOOL: - value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? - true : false; + if (param_data && nla_len(param_data)) + return -EINVAL; + value->vbool = nla_get_flag(param_data); break; } return 0; @@@ -4239,11 -4233,17 +4240,17 @@@ struct devlink_fmsg_item int attrtype; u8 nla_type; u16 len; - int value[0]; + int value[]; };
struct devlink_fmsg { struct list_head item_list; + bool putting_binary; /* This flag forces enclosing of binary data + * in an array brackets. It forces using + * of designated API: + * devlink_fmsg_binary_pair_nest_start() + * devlink_fmsg_binary_pair_nest_end() + */ };
static struct devlink_fmsg *devlink_fmsg_alloc(void) @@@ -4287,17 -4287,26 +4294,26 @@@ static int devlink_fmsg_nest_common(str
int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START); } EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END); }
int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end); @@@ -4308,6 -4317,9 +4324,9 @@@ static int devlink_fmsg_put_name(struc { struct devlink_fmsg_item *item;
+ if (fmsg->putting_binary) + return -EINVAL; + if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE) return -EMSGSIZE;
@@@ -4328,6 -4340,9 +4347,9 @@@ int devlink_fmsg_pair_nest_start(struc { int err;
+ if (fmsg->putting_binary) + return -EINVAL; + err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START); if (err) return err; @@@ -4342,6 -4357,9 +4364,9 @@@ EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nes
int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end); @@@ -4351,6 -4369,9 +4376,9 @@@ int devlink_fmsg_arr_pair_nest_start(st { int err;
+ if (fmsg->putting_binary) + return -EINVAL; + err = devlink_fmsg_pair_nest_start(fmsg, name); if (err) return err; @@@ -4367,6 -4388,9 +4395,9 @@@ int devlink_fmsg_arr_pair_nest_end(stru { int err;
+ if (fmsg->putting_binary) + return -EINVAL; + err = devlink_fmsg_nest_end(fmsg); if (err) return err; @@@ -4379,6 -4403,30 +4410,30 @@@ } EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+ int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name) + { + int err; + + err = devlink_fmsg_arr_pair_nest_start(fmsg, name); + if (err) + return err; + + fmsg->putting_binary = true; + return err; + } + EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start); + + int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg) + { + if (!fmsg->putting_binary) + return -EINVAL; + + fmsg->putting_binary = false; + return devlink_fmsg_arr_pair_nest_end(fmsg); + } + EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end); + static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg, const void *value, u16 value_len, u8 value_nla_type) @@@ -4403,40 -4451,59 +4458,59 @@@
int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG); } EXPORT_SYMBOL_GPL(devlink_fmsg_bool_put);
int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8); } EXPORT_SYMBOL_GPL(devlink_fmsg_u8_put);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32); } EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64); } EXPORT_SYMBOL_GPL(devlink_fmsg_u64_put);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1, NLA_NUL_STRING); } EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
- static int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, - u16 value_len) + int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, + u16 value_len) { + if (!fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY); } + EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, bool value) @@@ -4547,10 -4614,11 +4621,11 @@@ int devlink_fmsg_binary_pair_put(struc const void *value, u32 value_len) { u32 data_size; + int end_err; u32 offset; int err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, name); + err = devlink_fmsg_binary_pair_nest_start(fmsg, name); if (err) return err;
@@@ -4560,14 -4628,18 +4635,18 @@@ data_size = DEVLINK_FMSG_MAX_SIZE; err = devlink_fmsg_binary_put(fmsg, value + offset, data_size); if (err) - return err; + break; + /* Exit from loop with a break (instead of + * return) to make sure putting_binary is turned off in + * devlink_fmsg_binary_pair_nest_end + */ }
- err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + end_err = devlink_fmsg_binary_pair_nest_end(fmsg); + if (end_err) + err = end_err;
- return 0; + return err; } EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
@@@ -5469,6 -5541,9 +5548,9 @@@ static int devlink_trap_metadata_put(st if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) && nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT)) goto nla_put_failure; + if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) && + nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE)) + goto nla_put_failure;
nla_nest_end(msg, attr);
@@@ -5958,8 -6033,6 +6040,8 @@@ static const struct nla_policy devlink_ [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 }, + [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 }, + [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 }, [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 }, [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 }, @@@ -6734,6 -6807,7 +6816,7 @@@ static int __devlink_port_phys_port_nam
switch (attrs->flavour) { case DEVLINK_PORT_FLAVOUR_PHYSICAL: + case DEVLINK_PORT_FLAVOUR_VIRTUAL: if (!attrs->split) n = snprintf(name, len, "p%u", attrs->phys.port_number); else @@@ -7734,6 -7808,8 +7817,8 @@@ static const struct devlink_trap devlin DEVLINK_TRAP(NON_ROUTABLE, DROP), DEVLINK_TRAP(DECAP_ERROR, EXCEPTION), DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP), + DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP), + DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP), };
#define DEVLINK_TRAP_GROUP(_id) \ @@@ -7747,6 -7823,7 +7832,7 @@@ static const struct devlink_trap_group DEVLINK_TRAP_GROUP(L3_DROPS), DEVLINK_TRAP_GROUP(BUFFER_DROPS), DEVLINK_TRAP_GROUP(TUNNEL_DROPS), + DEVLINK_TRAP_GROUP(ACL_DROPS), };
static int devlink_trap_generic_verify(const struct devlink_trap *trap) @@@ -8138,12 -8215,14 +8224,14 @@@ devlink_trap_stats_update(struct devlin static void devlink_trap_report_metadata_fill(struct net_dm_hw_metadata *hw_metadata, const struct devlink_trap_item *trap_item, - struct devlink_port *in_devlink_port) + struct devlink_port *in_devlink_port, + const struct flow_action_cookie *fa_cookie) { struct devlink_trap_group_item *group_item = trap_item->group_item;
hw_metadata->trap_group_name = group_item->group->name; hw_metadata->trap_name = trap_item->trap->name; + hw_metadata->fa_cookie = fa_cookie;
spin_lock(&in_devlink_port->type_lock); if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH) @@@ -8157,9 -8236,12 +8245,12 @@@ * @skb: Trapped packet. * @trap_ctx: Trap context. * @in_devlink_port: Input devlink port. + * @fa_cookie: Flow action cookie. Could be NULL. */ void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb, - void *trap_ctx, struct devlink_port *in_devlink_port) + void *trap_ctx, struct devlink_port *in_devlink_port, + const struct flow_action_cookie *fa_cookie) + { struct devlink_trap_item *trap_item = trap_ctx; struct net_dm_hw_metadata hw_metadata = {}; @@@ -8168,7 -8250,7 +8259,7 @@@ devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
devlink_trap_report_metadata_fill(&hw_metadata, trap_item, - in_devlink_port); + in_devlink_port, fa_cookie); net_dm_hw_report(skb, &hw_metadata); } EXPORT_SYMBOL_GPL(devlink_trap_report); diff --combined net/core/sock.c index 8f71684305c3,e4af4dbc1c9e..0fc8937a7ff4 --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -1572,13 -1572,14 +1572,14 @@@ static inline void sock_lock_init(struc */ static void sock_copy(struct sock *nsk, const struct sock *osk) { + const struct proto *prot = READ_ONCE(osk->sk_prot); #ifdef CONFIG_SECURITY_NETWORK void *sptr = nsk->sk_security; #endif memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, - osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); + prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
#ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; @@@ -1792,16 -1793,17 +1793,17 @@@ static void sk_init_common(struct sock */ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) { + struct proto *prot = READ_ONCE(sk->sk_prot); struct sock *newsk; bool is_charged = true;
- newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); + newsk = sk_prot_alloc(prot, priority, sk->sk_family); if (newsk != NULL) { struct sk_filter *filter;
sock_copy(newsk, sk);
- newsk->sk_prot_creator = sk->sk_prot; + newsk->sk_prot_creator = prot;
/* SANITY */ if (likely(newsk->sk_net_refcnt)) @@@ -1830,10 -1832,7 +1832,10 @@@ atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE); - mem_cgroup_sk_alloc(newsk); + + /* sk->sk_memcg will be populated at accept() time */ + newsk->sk_memcg = NULL; + cgroup_sk_alloc(&newsk->sk_cgrp_data);
rcu_read_lock(); @@@ -1866,6 -1865,12 +1868,12 @@@ goto out; }
+ /* Clear sk_user_data if parent had the pointer tagged + * as not suitable for copying when cloning. + */ + if (sk_user_data_is_nocopy(newsk)) + RCU_INIT_POINTER(newsk->sk_user_data, NULL); + newsk->sk_err = 0; newsk->sk_err_soft = 0; newsk->sk_priority = 0; diff --combined net/core/sock_map.c index b70c844a88ec,2e0f465295c3..0b762e43eda9 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@@ -10,6 -10,7 +10,7 @@@ #include <linux/skmsg.h> #include <linux/list.h> #include <linux/jhash.h> + #include <linux/sock_diag.h>
struct bpf_stab { struct bpf_map map; @@@ -31,7 -32,8 +32,8 @@@ static struct bpf_map *sock_map_alloc(u return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || + (attr->value_size != sizeof(u32) && + attr->value_size != sizeof(u64)) || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL);
@@@ -228,16 -230,37 +230,40 @@@ out return ret; }
+ static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk) + { + struct sk_psock *psock; + int ret; + + psock = sk_psock_get_checked(sk); + if (IS_ERR(psock)) + return PTR_ERR(psock); + + if (psock) { + tcp_bpf_reinit(sk); + return 0; + } + + psock = sk_psock_init(sk, map->numa_node); + if (!psock) + return -ENOMEM; + + ret = tcp_bpf_init(sk); + if (ret < 0) + sk_psock_put(sk, psock); + return ret; + } + static void sock_map_free(struct bpf_map *map) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); int i;
+ /* After the sync no updates or deletes will be in-flight so it + * is safe to walk map and remove entries without risking a race + * in EEXIST update case. + */ synchronize_rcu(); - raw_spin_lock_bh(&stab->lock); for (i = 0; i < stab->map.max_entries; i++) { struct sock **psk = &stab->sks[i]; struct sock *sk; @@@ -251,6 -274,7 +277,6 @@@ release_sock(sk); } } - raw_spin_unlock_bh(&stab->lock);
/* wait for psock readers accessing its map link */ synchronize_rcu(); @@@ -277,7 -301,22 +303,22 @@@ static struct sock *__sock_map_lookup_e
static void *sock_map_lookup(struct bpf_map *map, void *key) { - return ERR_PTR(-EOPNOTSUPP); + return __sock_map_lookup_elem(map, *(u32 *)key); + } + + static void *sock_map_lookup_sys(struct bpf_map *map, void *key) + { + struct sock *sk; + + if (map->value_size != sizeof(u64)) + return ERR_PTR(-ENOSPC); + + sk = __sock_map_lookup_elem(map, *(u32 *)key); + if (!sk) + return ERR_PTR(-ENOENT); + + sock_gen_cookie(sk); + return &sk->sk_cookie; }
static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, @@@ -336,6 -375,11 +377,11 @@@ static int sock_map_get_next_key(struc return 0; }
+ static bool sock_map_redirect_allowed(const struct sock *sk) + { + return sk->sk_state != TCP_LISTEN; + } + static int sock_map_update_common(struct bpf_map *map, u32 idx, struct sock *sk, u64 flags) { @@@ -358,7 -402,14 +404,14 @@@ if (!link) return -ENOMEM;
- ret = sock_map_link(map, &stab->progs, sk); + /* Only sockets we can redirect into/from in BPF need to hold + * refs to parser/verdict progs and have their sk_data_ready + * and sk_write_space callbacks overridden. + */ + if (sock_map_redirect_allowed(sk)) + ret = sock_map_link(map, &stab->progs, sk); + else + ret = sock_map_link_no_progs(map, sk); if (ret < 0) goto out_free;
@@@ -393,7 -444,8 +446,8 @@@ out_free static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) { return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || - ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; + ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || + ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; }
static bool sock_map_sk_is_suitable(const struct sock *sk) @@@ -402,14 -454,26 +456,26 @@@ sk->sk_protocol == IPPROTO_TCP; }
+ static bool sock_map_sk_state_allowed(const struct sock *sk) + { + return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); + } + static int sock_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { - u32 ufd = *(u32 *)value; u32 idx = *(u32 *)key; struct socket *sock; struct sock *sk; int ret; + u64 ufd; + + if (map->value_size == sizeof(u64)) + ufd = *(u64 *)value; + else + ufd = *(u32 *)value; + if (ufd > S32_MAX) + return -EINVAL;
sock = sockfd_lookup(ufd, &ret); if (!sock) @@@ -425,7 -489,7 +491,7 @@@ }
sock_map_sk_acquire(sk); - if (sk->sk_state != TCP_ESTABLISHED) + if (!sock_map_sk_state_allowed(sk)) ret = -EOPNOTSUPP; else ret = sock_map_update_common(map, idx, sk, flags); @@@ -462,13 -526,17 +528,17 @@@ BPF_CALL_4(bpf_sk_redirect_map, struct struct bpf_map *, map, u32, key, u64, flags) { struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + struct sock *sk;
if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - tcb->bpf.flags = flags; - tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key); - if (!tcb->bpf.sk_redir) + + sk = __sock_map_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + tcb->bpf.flags = flags; + tcb->bpf.sk_redir = sk; return SK_PASS; }
@@@ -485,12 -553,17 +555,17 @@@ const struct bpf_func_proto bpf_sk_redi BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, struct bpf_map *, map, u32, key, u64, flags) { + struct sock *sk; + if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - msg->flags = flags; - msg->sk_redir = __sock_map_lookup_elem(map, key); - if (!msg->sk_redir) + + sk = __sock_map_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + msg->flags = flags; + msg->sk_redir = sk; return SK_PASS; }
@@@ -508,6 -581,7 +583,7 @@@ const struct bpf_map_ops sock_map_ops .map_alloc = sock_map_alloc, .map_free = sock_map_free, .map_get_next_key = sock_map_get_next_key, + .map_lookup_elem_sys_only = sock_map_lookup_sys, .map_update_elem = sock_map_update_elem, .map_delete_elem = sock_map_delete_elem, .map_lookup_elem = sock_map_lookup, @@@ -520,7 -594,7 +596,7 @@@ struct bpf_htab_elem u32 hash; struct sock *sk; struct hlist_node node; - u8 key[0]; + u8 key[]; };
struct bpf_htab_bucket { @@@ -682,7 -756,14 +758,14 @@@ static int sock_hash_update_common(stru if (!link) return -ENOMEM;
- ret = sock_map_link(map, &htab->progs, sk); + /* Only sockets we can redirect into/from in BPF need to hold + * refs to parser/verdict progs and have their sk_data_ready + * and sk_write_space callbacks overridden. + */ + if (sock_map_redirect_allowed(sk)) + ret = sock_map_link(map, &htab->progs, sk); + else + ret = sock_map_link_no_progs(map, sk); if (ret < 0) goto out_free;
@@@ -731,10 -812,17 +814,17 @@@ out_free static int sock_hash_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { - u32 ufd = *(u32 *)value; struct socket *sock; struct sock *sk; int ret; + u64 ufd; + + if (map->value_size == sizeof(u64)) + ufd = *(u64 *)value; + else + ufd = *(u32 *)value; + if (ufd > S32_MAX) + return -EINVAL;
sock = sockfd_lookup(ufd, &ret); if (!sock) @@@ -750,7 -838,7 +840,7 @@@ }
sock_map_sk_acquire(sk); - if (sk->sk_state != TCP_ESTABLISHED) + if (!sock_map_sk_state_allowed(sk)) ret = -EOPNOTSUPP; else ret = sock_hash_update_common(map, key, sk, flags); @@@ -810,7 -898,8 +900,8 @@@ static struct bpf_map *sock_hash_alloc( return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size == 0 || - attr->value_size != 4 || + (attr->value_size != sizeof(u32) && + attr->value_size != sizeof(u64)) || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); if (attr->key_size > MAX_BPF_STACK) @@@ -865,13 -954,10 +956,13 @@@ static void sock_hash_free(struct bpf_m struct hlist_node *node; int i;
+ /* After the sync no updates or deletes will be in-flight so it + * is safe to walk map and remove entries without risking a race + * in EEXIST update case. + */ synchronize_rcu(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); - raw_spin_lock_bh(&bucket->lock); hlist_for_each_entry_safe(elem, node, &bucket->head, node) { hlist_del_rcu(&elem->node); lock_sock(elem->sk); @@@ -880,6 -966,7 +971,6 @@@ rcu_read_unlock(); release_sock(elem->sk); } - raw_spin_unlock_bh(&bucket->lock); }
/* wait for psock readers accessing its map link */ @@@ -889,6 -976,26 +980,26 @@@ kfree(htab); }
+ static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) + { + struct sock *sk; + + if (map->value_size != sizeof(u64)) + return ERR_PTR(-ENOSPC); + + sk = __sock_hash_lookup_elem(map, key); + if (!sk) + return ERR_PTR(-ENOENT); + + sock_gen_cookie(sk); + return &sk->sk_cookie; + } + + static void *sock_hash_lookup(struct bpf_map *map, void *key) + { + return __sock_hash_lookup_elem(map, key); + } + static void sock_hash_release_progs(struct bpf_map *map) { psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs); @@@ -920,13 -1027,17 +1031,17 @@@ BPF_CALL_4(bpf_sk_redirect_hash, struc struct bpf_map *, map, void *, key, u64, flags) { struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + struct sock *sk;
if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - tcb->bpf.flags = flags; - tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key); - if (!tcb->bpf.sk_redir) + + sk = __sock_hash_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + tcb->bpf.flags = flags; + tcb->bpf.sk_redir = sk; return SK_PASS; }
@@@ -943,12 -1054,17 +1058,17 @@@ const struct bpf_func_proto bpf_sk_redi BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, struct bpf_map *, map, void *, key, u64, flags) { + struct sock *sk; + if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - msg->flags = flags; - msg->sk_redir = __sock_hash_lookup_elem(map, key); - if (!msg->sk_redir) + + sk = __sock_hash_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + msg->flags = flags; + msg->sk_redir = sk; return SK_PASS; }
@@@ -968,7 -1084,8 +1088,8 @@@ const struct bpf_map_ops sock_hash_ops .map_get_next_key = sock_hash_get_next_key, .map_update_elem = sock_hash_update_elem, .map_delete_elem = sock_hash_delete_elem, - .map_lookup_elem = sock_map_lookup, + .map_lookup_elem = sock_hash_lookup, + .map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_release_uref = sock_hash_release_progs, .map_check_btf = map_check_no_btf, }; diff --combined net/dsa/port.c index ec13dc666788,d4450a454249..e6875d8f944d --- a/net/dsa/port.c +++ b/net/dsa/port.c @@@ -63,7 -63,7 +63,7 @@@ static void dsa_port_set_state_now(stru pr_err("DSA: failed to set STP state %u (%d)\n", state, err); }
-int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) { struct dsa_switch *ds = dp->ds; int port = dp->index; @@@ -78,31 -78,14 +78,31 @@@ if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+ if (dp->pl) + phylink_start(dp->pl); + return 0; }
-void dsa_port_disable(struct dsa_port *dp) +int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) +{ + int err; + + rtnl_lock(); + err = dsa_port_enable_rt(dp, phy); + rtnl_unlock(); + + return err; +} + +void dsa_port_disable_rt(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; int port = dp->index;
+ if (dp->pl) + phylink_stop(dp->pl); + if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_DISABLED);
@@@ -110,13 -93,6 +110,13 @@@ ds->ops->port_disable(ds, port); }
+void dsa_port_disable(struct dsa_port *dp) +{ + rtnl_lock(); + dsa_port_disable_rt(dp); + rtnl_unlock(); +} + int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br) { struct dsa_notifier_bridge_info info = { @@@ -513,9 -489,11 +513,11 @@@ static void dsa_port_phylink_mac_link_d }
static void dsa_port_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + int speed, int duplex, + bool tx_pause, bool rx_pause) { struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); struct dsa_switch *ds = dp->ds; @@@ -526,7 -504,8 +528,8 @@@ return; }
- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev); + ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, + speed, duplex, tx_pause, rx_pause); }
const struct phylink_mac_ops dsa_port_phylink_mac_ops = { @@@ -638,6 -617,10 +641,6 @@@ static int dsa_port_phylink_register(st goto err_phy_connect; }
- rtnl_lock(); - phylink_start(dp->pl); - rtnl_unlock(); - return 0;
err_phy_connect: @@@ -648,14 -631,9 +651,14 @@@ int dsa_port_link_register_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; + struct device_node *phy_np;
- if (!ds->ops->adjust_link) - return dsa_port_phylink_register(dp); + if (!ds->ops->adjust_link) { + phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); + if (of_phy_is_fixed_link(dp->dn) || phy_np) + return dsa_port_phylink_register(dp); + return 0; + }
dev_warn(ds->dev, "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); @@@ -670,12 -648,11 +673,12 @@@ void dsa_port_link_unregister_of(struc { struct dsa_switch *ds = dp->ds;
- if (!ds->ops->adjust_link) { + if (!ds->ops->adjust_link && dp->pl) { rtnl_lock(); phylink_disconnect_phy(dp->pl); rtnl_unlock(); phylink_destroy(dp->pl); + dp->pl = NULL; return; }
diff --combined net/dsa/slave.c index ddc0f9236928,fca9bfa8437e..c5beb3031a72 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@@ -88,10 -88,12 +88,10 @@@ static int dsa_slave_open(struct net_de goto clear_allmulti; }
- err = dsa_port_enable(dp, dev->phydev); + err = dsa_port_enable_rt(dp, dev->phydev); if (err) goto clear_promisc;
- phylink_start(dp->pl); - return 0;
clear_promisc: @@@ -112,7 -114,9 +112,7 @@@ static int dsa_slave_close(struct net_d struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev);
- phylink_stop(dp->pl); - - dsa_port_disable(dp); + dsa_port_disable_rt(dp);
dev_mc_unsync(master, dev); dev_uc_unsync(master, dev); @@@ -861,6 -865,10 +861,10 @@@ static int dsa_slave_add_cls_matchall(s if (!flow_offload_has_one_action(&cls->rule->action)) return err;
+ if (!flow_action_basic_hw_stats_types_check(&cls->rule->action, + cls->common.extack)) + return err; + act = &cls->rule->action.entries[0];
if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { @@@ -942,6 -950,64 +946,64 @@@ static int dsa_slave_setup_tc_cls_match } }
+ static int dsa_slave_add_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) + { + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_add) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_add(ds, port, cls, ingress); + } + + static int dsa_slave_del_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) + { + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_del) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_del(ds, port, cls, ingress); + } + + static int dsa_slave_stats_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) + { + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_stats) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_stats(ds, port, cls, ingress); + } + + static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) + { + switch (cls->command) { + case FLOW_CLS_REPLACE: + return dsa_slave_add_cls_flower(dev, cls, ingress); + case FLOW_CLS_DESTROY: + return dsa_slave_del_cls_flower(dev, cls, ingress); + case FLOW_CLS_STATS: + return dsa_slave_stats_cls_flower(dev, cls, ingress); + default: + return -EOPNOTSUPP; + } + } + static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv, bool ingress) { @@@ -953,6 -1019,8 +1015,8 @@@ switch (type) { case TC_SETUP_CLSMATCHALL: return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); + case TC_SETUP_CLSFLOWER: + return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); default: return -EOPNOTSUPP; } diff --combined net/ipv4/inet_connection_sock.c index d545fb99a8a1,3b4f81790e3e..5f34eb951627 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@@ -131,7 -131,7 +131,7 @@@ static int inet_csk_bind_conflict(cons { struct sock *sk2; bool reuse = sk->sk_reuse; - bool reuseport = !!sk->sk_reuseport && reuseport_ok; + bool reuseport = !!sk->sk_reuseport; kuid_t uid = sock_i_uid((struct sock *)sk);
/* @@@ -146,17 -146,21 +146,21 @@@ (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { - if ((!reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) && - (!reuseport || !sk2->sk_reuseport || - rcu_access_pointer(sk->sk_reuseport_cb) || - (sk2->sk_state != TCP_TIME_WAIT && - !uid_eq(uid, sock_i_uid(sk2))))) { - if (inet_rcv_saddr_equal(sk, sk2, true)) - break; - } - if (!relax && reuse && sk2->sk_reuse && + if (reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { + if ((!relax || + (!reuseport_ok && + reuseport && sk2->sk_reuseport && + !rcu_access_pointer(sk->sk_reuseport_cb) && + (sk2->sk_state == TCP_TIME_WAIT || + uid_eq(uid, sock_i_uid(sk2))))) && + inet_rcv_saddr_equal(sk, sk2, true)) + break; + } else if (!reuseport_ok || + !reuseport || !sk2->sk_reuseport || + rcu_access_pointer(sk->sk_reuseport_cb) || + (sk2->sk_state != TCP_TIME_WAIT && + !uid_eq(uid, sock_i_uid(sk2)))) { if (inet_rcv_saddr_equal(sk, sk2, true)) break; } @@@ -176,12 -180,14 +180,14 @@@ inet_csk_find_open_port(struct sock *sk int port = 0; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); + bool relax = false; int i, low, high, attempt_half; struct inet_bind_bucket *tb; u32 remaining, offset; int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk); + ports_exhausted: attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; other_half_scan: inet_get_local_port_range(net, &low, &high); @@@ -219,7 -225,7 +225,7 @@@ other_parity_scan inet_bind_bucket_for_each(tb, &head->chain) if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && tb->port == port) { - if (!inet_csk_bind_conflict(sk, tb, false, false)) + if (!inet_csk_bind_conflict(sk, tb, relax, false)) goto success; goto next_port; } @@@ -239,6 -245,12 +245,12 @@@ next_port attempt_half = 2; goto other_half_scan; } + + if (net->ipv4.sysctl_ip_autobind_reuse && !relax) { + /* We still have a chance to connect to different destinations */ + relax = true; + goto ports_exhausted; + } return NULL; success: *port_ret = port; @@@ -482,28 -494,8 +494,28 @@@ struct sock *inet_csk_accept(struct soc } spin_unlock_bh(&queue->fastopenq.lock); } + out: release_sock(sk); + if (newsk && mem_cgroup_sockets_enabled) { + int amt; + + /* atomically get the memory usage, set and charge the + * newsk->sk_memcg. + */ + lock_sock(newsk); + + /* The socket has not been accepted yet, no need to look at + * newsk->sk_wmem_queued. + */ + amt = sk_mem_pages(newsk->sk_forward_alloc + + atomic_read(&newsk->sk_rmem_alloc)); + mem_cgroup_sk_alloc(newsk); + if (newsk->sk_memcg && amt) + mem_cgroup_charge_skmem(newsk->sk_memcg, amt); + + release_sock(newsk); + } if (req) reqsk_put(req); return newsk; diff --combined net/ipv4/inet_diag.c index 8c8377568a78,e1cad25909df..5d50aad3cdbf --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@@ -23,6 -23,7 +23,7 @@@ #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/inet6_hashtables.h> + #include <net/bpf_sk_storage.h> #include <net/netlink.h>
#include <linux/inet.h> @@@ -100,9 -101,13 +101,9 @@@ static size_t inet_sk_attr_size(struct aux = handler->idiag_get_aux_size(sk, net_admin);
return nla_total_size(sizeof(struct tcp_info)) - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ - + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + nla_total_size(TCP_CA_NAME_MAX) + nla_total_size(sizeof(struct tcpvegas_info)) @@@ -143,24 -148,6 +144,24 @@@ int inet_diag_msg_attrs_fill(struct soc if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark)) goto errout;
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || + ext & (1 << (INET_DIAG_TCLASS - 1))) { + u32 classid = 0; + +#ifdef CONFIG_SOCK_CGROUP_DATA + classid = sock_cgroup_classid(&sk->sk_cgrp_data); +#endif + /* Fallback to socket priority if class id isn't set. + * Classful qdiscs use it as direct reference to class. + * For cgroup2 classid is always zero. + */ + if (!classid) + classid = sk->sk_priority; + + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) + goto errout; + } + r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk);
@@@ -170,26 -157,28 +171,28 @@@ errout } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
+ #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, - bool net_admin) + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin) { const struct tcp_congestion_ops *ca_ops; const struct inet_diag_handler *handler; + struct inet_diag_dump_data *cb_data; int ext = req->idiag_ext; struct inet_diag_msg *r; struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL;
+ cb_data = cb->data; handler = inet_diag_table[req->sdiag_protocol]; BUG_ON(!handler);
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE;
@@@ -201,7 -190,9 +204,9 @@@ r->idiag_timer = 0; r->idiag_retrans = 0;
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) + if (inet_diag_msg_attrs_fill(sk, skb, r, ext, + sk_user_ns(NETLINK_CB(cb->skb).sk), + net_admin)) goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { @@@ -298,6 -289,66 +303,48 @@@ goto errout; }
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || - ext & (1 << (INET_DIAG_TCLASS - 1))) { - u32 classid = 0; - -#ifdef CONFIG_SOCK_CGROUP_DATA - classid = sock_cgroup_classid(&sk->sk_cgrp_data); -#endif - /* Fallback to socket priority if class id isn't set. - * Classful qdiscs use it as direct reference to class. - * For cgroup2 classid is always zero. - */ - if (!classid) - classid = sk->sk_priority; - - if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) - goto errout; - } - + /* Keep it at the end for potential retry with a larger skb, + * or else do best-effort fitting, which is only done for the + * first_nlmsg. + */ + if (cb_data->bpf_stg_diag) { + bool first_nlmsg = ((unsigned char *)nlh == skb->data); + unsigned int prev_min_dump_alloc; + unsigned int total_nla_size = 0; + unsigned int msg_len; + int err; + + msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; + err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb, + INET_DIAG_SK_BPF_STORAGES, + &total_nla_size); + + if (!err) + goto out; + + total_nla_size += msg_len; + prev_min_dump_alloc = cb->min_dump_alloc; + if (total_nla_size > prev_min_dump_alloc) + cb->min_dump_alloc = min_t(u32, total_nla_size, + MAX_DUMP_ALLOC_SIZE); + + if (!first_nlmsg) + goto errout; + + if (cb->min_dump_alloc > prev_min_dump_alloc) + /* Retry with pskb_expand_head() with + * __GFP_DIRECT_RECLAIM + */ + goto errout; + + WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc); + + /* Send what we have for this sk + * and move on to the next sk in the following + * dump() + */ + } + out: nlmsg_end(skb, nlh); return 0; @@@ -308,30 -359,19 +355,19 @@@ errout } EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
- static int inet_csk_diag_fill(struct sock *sk, - struct sk_buff *skb, - const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, - bool net_admin) - { - return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns, - portid, seq, nlmsg_flags, unlh, net_admin); - } - static int inet_twsk_diag_fill(struct sock *sk, struct sk_buff *skb, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + struct netlink_callback *cb, + u16 nlmsg_flags) { struct inet_timewait_sock *tw = inet_twsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, + sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE;
@@@ -355,16 -395,16 +391,16 @@@ }
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin) + struct netlink_callback *cb, + u16 nlmsg_flags, bool net_admin) { struct request_sock *reqsk = inet_reqsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE;
@@@ -393,21 -433,18 +429,18 @@@ }
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, + struct netlink_callback *cb, const struct inet_diag_req_v2 *r, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin) + u16 nlmsg_flags, bool net_admin) { if (sk->sk_state == TCP_TIME_WAIT) - return inet_twsk_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh); + return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
if (sk->sk_state == TCP_NEW_SYN_RECV) - return inet_req_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh, net_admin); + return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
- return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, - nlmsg_flags, unlh, net_admin); + return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags, + net_admin); }
struct sock *inet_diag_find_one_icsk(struct net *net, @@@ -455,10 -492,10 +488,10 @@@ EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN); struct net *net = sock_net(in_skb->sk); struct sk_buff *rep; @@@ -475,10 -512,7 +508,7 @@@ goto out; }
- err = sk_diag_fill(sk, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, net_admin); + err = sk_diag_fill(sk, rep, cb, req, 0, net_admin); if (err < 0) { WARN_ON(err == -EMSGSIZE); nlmsg_free(rep); @@@ -505,14 -539,21 +535,21 @@@ static int inet_diag_cmd_exact(int cmd int err;
handler = inet_diag_lock_handler(req->sdiag_protocol); - if (IS_ERR(handler)) + if (IS_ERR(handler)) { err = PTR_ERR(handler); - else if (cmd == SOCK_DIAG_BY_FAMILY) - err = handler->dump_one(in_skb, nlh, req); - else if (cmd == SOCK_DESTROY && handler->destroy) + } else if (cmd == SOCK_DIAG_BY_FAMILY) { + struct inet_diag_dump_data empty_dump_data = {}; + struct netlink_callback cb = { + .nlh = nlh, + .skb = in_skb, + .data = &empty_dump_data, + }; + err = handler->dump_one(&cb, req); + } else if (cmd == SOCK_DESTROY && handler->destroy) { err = handler->destroy(in_skb, req); - else + } else { err = -EOPNOTSUPP; + } inet_diag_unlock_handler(handler);
return err; @@@ -843,23 -884,6 +880,6 @@@ static int inet_diag_bc_audit(const str return len == 0 ? 0 : -EINVAL; }
- static int inet_csk_diag_dump(struct sock *sk, - struct sk_buff *skb, - struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - const struct nlattr *bc, - bool net_admin) - { - if (!inet_diag_bc_sk(bc, sk)) - return 0; - - return inet_csk_diag_fill(sk, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, - net_admin); - } - static void twsk_build_assert(void) { BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) != @@@ -888,14 -912,17 +908,17 @@@
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); + struct inet_diag_dump_data *cb_data = cb->data; struct net *net = sock_net(skb->sk); u32 idiag_states = r->idiag_states; int i, num, s_i, s_num; + struct nlattr *bc; struct sock *sk;
+ bc = cb_data->inet_diag_nla_bc; if (idiag_states & TCPF_SYN_RECV) idiag_states |= TCPF_NEW_SYN_RECV; s_i = cb->args[1]; @@@ -931,8 -958,12 +954,12 @@@ r->id.idiag_sport) goto next_listen;
- if (inet_csk_diag_dump(sk, skb, cb, r, - bc, net_admin) < 0) { + if (!inet_diag_bc_sk(bc, sk)) + goto next_listen; + + if (inet_sk_diag_fill(sk, inet_csk(sk), skb, + cb, r, NLM_F_MULTI, + net_admin) < 0) { spin_unlock(&ilb->lock); goto done; } @@@ -1010,11 -1041,8 +1037,8 @@@ next_normal res = 0; for (idx = 0; idx < accum; idx++) { if (res >= 0) { - res = sk_diag_fill(sk_arr[idx], skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh, net_admin); + res = sk_diag_fill(sk_arr[idx], skb, cb, r, + NLM_F_MULTI, net_admin); if (res < 0) num = num_arr[idx]; } @@@ -1038,31 -1066,101 +1062,101 @@@ out EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc) + const struct inet_diag_req_v2 *r) { const struct inet_diag_handler *handler; + u32 prev_min_dump_alloc; int err = 0;
+ again: + prev_min_dump_alloc = cb->min_dump_alloc; handler = inet_diag_lock_handler(r->sdiag_protocol); if (!IS_ERR(handler)) - handler->dump(skb, cb, r, bc); + handler->dump(skb, cb, r); else err = PTR_ERR(handler); inet_diag_unlock_handler(handler);
+ /* The skb is not large enough to fit one sk info and + * inet_sk_diag_fill() has requested for a larger skb. + */ + if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) { + err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL); + if (!err) + goto again; + } + return err ? : skb->len; }
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { - int hdrlen = sizeof(struct inet_diag_req_v2); - struct nlattr *bc = NULL; + return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh)); + }
- if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); + static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) + { + const struct nlmsghdr *nlh = cb->nlh; + struct inet_diag_dump_data *cb_data; + struct sk_buff *skb = cb->skb; + struct nlattr *nla; + int rem, err; + + cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL); + if (!cb_data) + return -ENOMEM; + + nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), rem) { + int type = nla_type(nla); + + if (type < __INET_DIAG_REQ_MAX) + cb_data->req_nlas[type] = nla; + } + + nla = cb_data->inet_diag_nla_bc; + if (nla) { + err = inet_diag_bc_audit(nla, skb); + if (err) { + kfree(cb_data); + return err; + } + } + + nla = cb_data->inet_diag_nla_bpf_stgs; + if (nla) { + struct bpf_sk_storage_diag *bpf_stg_diag; + + bpf_stg_diag = bpf_sk_storage_diag_alloc(nla); + if (IS_ERR(bpf_stg_diag)) { + kfree(cb_data); + return PTR_ERR(bpf_stg_diag); + } + cb_data->bpf_stg_diag = bpf_stg_diag; + } + + cb->data = cb_data; + return 0; + } + + static int inet_diag_dump_start(struct netlink_callback *cb) + { + return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2)); + }
- return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc); + static int inet_diag_dump_start_compat(struct netlink_callback *cb) + { + return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req)); + } + + static int inet_diag_dump_done(struct netlink_callback *cb) + { + struct inet_diag_dump_data *cb_data = cb->data; + + bpf_sk_storage_diag_free(cb_data->bpf_stg_diag); + kfree(cb->data); + + return 0; }
static int inet_diag_type2proto(int type) @@@ -1081,9 -1179,7 +1175,7 @@@ static int inet_diag_dump_compat(struc struct netlink_callback *cb) { struct inet_diag_req *rc = nlmsg_data(cb->nlh); - int hdrlen = sizeof(struct inet_diag_req); struct inet_diag_req_v2 req; - struct nlattr *bc = NULL;
req.sdiag_family = AF_UNSPEC; /* compatibility */ req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); @@@ -1091,10 -1187,7 +1183,7 @@@ req.idiag_states = rc->idiag_states; req.id = rc->id;
- if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - - return __inet_diag_dump(skb, cb, &req, bc); + return __inet_diag_dump(skb, cb, &req); }
static int inet_diag_get_exact_compat(struct sk_buff *in_skb, @@@ -1122,22 -1215,12 +1211,12 @@@ static int inet_diag_rcv_msg_compat(str return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(nlh, hdrlen)) { - struct nlattr *attr; - int err; - - attr = nlmsg_find_attr(nlh, hdrlen, - INET_DIAG_REQ_BYTECODE); - err = inet_diag_bc_audit(attr, skb); - if (err) - return err; - } - { - struct netlink_dump_control c = { - .dump = inet_diag_dump_compat, - }; - return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); - } + struct netlink_dump_control c = { + .start = inet_diag_dump_start_compat, + .done = inet_diag_dump_done, + .dump = inet_diag_dump_compat, + }; + return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); }
return inet_diag_get_exact_compat(skb, nlh); @@@ -1153,22 -1236,12 +1232,12 @@@ static int inet_diag_handler_cmd(struc
if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(h, hdrlen)) { - struct nlattr *attr; - int err; - - attr = nlmsg_find_attr(h, hdrlen, - INET_DIAG_REQ_BYTECODE); - err = inet_diag_bc_audit(attr, skb); - if (err) - return err; - } - { - struct netlink_dump_control c = { - .dump = inet_diag_dump, - }; - return netlink_dump_start(net->diag_nlsk, skb, h, &c); - } + struct netlink_dump_control c = { + .start = inet_diag_dump_start, + .done = inet_diag_dump_done, + .dump = inet_diag_dump, + }; + return netlink_dump_start(net->diag_nlsk, skb, h, &c); }
return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h)); diff --combined net/ipv4/raw_diag.c index a93e7d1e1251,d19cce39be1b..1b5b8af27aaf --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@@ -87,32 -87,29 +87,30 @@@ out_unlock return sk ? sk : ERR_PTR(-ENOENT); }
- static int raw_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + static int raw_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { - struct net *net = sock_net(in_skb->sk); + struct sk_buff *in_skb = cb->skb; struct sk_buff *rep; struct sock *sk; + struct net *net; int err;
+ net = sock_net(in_skb->sk); sk = raw_sock_get(net, r); if (IS_ERR(sk)) return PTR_ERR(sk);
- rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + 64, + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) { sock_put(sk); return -ENOMEM; }
- err = inet_sk_diag_fill(sk, NULL, rep, r, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, + err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0, netlink_net_capable(in_skb, CAP_NET_ADMIN)); sock_put(sk);
@@@ -137,25 -134,25 +135,25 @@@ static int sk_diag_dump(struct sock *sk if (!inet_diag_bc_sk(bc, sk)) return 0;
- return inet_sk_diag_fill(sk, NULL, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh, net_admin); + return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin); }
static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; int num, s_num, slot, s_slot; struct sock *sk = NULL; + struct nlattr *bc;
if (IS_ERR(hashinfo)) return;
+ cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; s_slot = cb->args[0]; num = s_num = cb->args[1];
diff --combined net/ipv4/udp_diag.c index dccd2286bc28,93884696abdd..1dbece34496e --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@@ -21,16 -21,15 +21,15 @@@ static int sk_diag_dump(struct sock *sk if (!inet_diag_bc_sk(bc, sk)) return 0;
- return inet_sk_diag_fill(sk, NULL, skb, req, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin); + return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI, + net_admin); }
- static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + static int udp_dump_one(struct udp_table *tbl, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; int err = -EINVAL; struct sock *sk = NULL; struct sk_buff *rep; @@@ -64,18 -63,14 +63,15 @@@ goto out;
err = -ENOMEM; - rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + 64, + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) goto out;
- err = inet_sk_diag_fill(sk, NULL, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, - netlink_net_capable(in_skb, CAP_NET_ADMIN)); + err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(rep); @@@ -94,12 -89,16 +90,16 @@@ out_nosk
static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; int num, s_num, slot, s_slot; + struct nlattr *bc;
+ cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; s_slot = cb->args[0]; num = s_num = cb->args[1];
@@@ -147,15 -146,15 +147,15 @@@ done }
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - udp_dump(&udp_table, skb, cb, r, bc); + udp_dump(&udp_table, skb, cb, r); }
- static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, + static int udp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return udp_dump_one(&udp_table, in_skb, nlh, req); + return udp_dump_one(&udp_table, cb, req); }
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, @@@ -250,16 -249,15 +250,15 @@@ static const struct inet_diag_handler u };
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - udp_dump(&udplite_table, skb, cb, r, bc); + udp_dump(&udplite_table, skb, cb, r); }
- static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, + static int udplite_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return udp_dump_one(&udplite_table, in_skb, nlh, req); + return udp_dump_one(&udplite_table, cb, req); }
static const struct inet_diag_handler udplite_diag_handler = { diff --combined net/ipv6/addrconf.c index 46d614b611db,c614249dfb7d..5b9de773ce73 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -1226,13 -1226,11 +1226,13 @@@ check_cleanup_prefix_route(struct inet6 }
static void -cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt) +cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, + bool del_rt, bool del_peer) { struct fib6_info *f6i;
- f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, + f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (f6i) { if (del_rt) @@@ -1295,7 -1293,7 +1295,7 @@@ static void ipv6_del_addr(struct inet6_
if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, expires, - action == CLEANUP_PREFIX_RT_DEL); + action == CLEANUP_PREFIX_RT_DEL, false); }
/* clean up prefsrc entries */ @@@ -3301,7 -3299,7 +3301,7 @@@ static void addrconf_addr_gen(struct in switch (idev->cnf.addr_gen_mode) { case IN6_ADDR_GEN_MODE_RANDOM: ipv6_gen_mode_random_init(idev); - /* fallthrough */ + fallthrough; case IN6_ADDR_GEN_MODE_STABLE_PRIVACY: if (!ipv6_generate_stable_address(&addr, 0, idev)) addrconf_add_linklocal(idev, &addr, @@@ -3347,10 -3345,6 +3347,10 @@@ static void addrconf_dev_config(struct (dev->type != ARPHRD_NONE) && (dev->type != ARPHRD_RAWIP)) { /* Alas, we support only Ethernet autoconfiguration. */ + idev = __in6_dev_get(dev); + if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP && + dev->flags & IFF_MULTICAST) + ipv6_mc_up(idev); return; }
@@@ -3523,9 -3517,7 +3523,7 @@@ static int addrconf_notify(struct notif break;
run_pending = 1; - - /* fall through */ - + fallthrough; case NETDEV_UP: case NETDEV_CHANGE: if (dev->flags & IFF_SLAVE) @@@ -4592,14 -4584,12 +4590,14 @@@ inet6_rtm_deladdr(struct sk_buff *skb, }
static int modify_prefix_route(struct inet6_ifaddr *ifp, - unsigned long expires, u32 flags) + unsigned long expires, u32 flags, + bool modify_peer) { struct fib6_info *f6i; u32 prio;
- f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, + f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (!f6i) return -ENOENT; @@@ -4610,8 -4600,7 +4608,8 @@@ ip6_del_rt(dev_net(ifp->idev->dev), f6i);
/* add new one */ - addrconf_prefix_route(&ifp->addr, ifp->prefix_len, + addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } else { @@@ -4633,7 -4622,6 +4631,7 @@@ static int inet6_addr_modify(struct ine unsigned long timeout; bool was_managetempaddr; bool had_prefixroute; + bool new_peer = false;
ASSERT_RTNL();
@@@ -4665,13 -4653,6 +4663,13 @@@ cfg->preferred_lft = timeout; }
+ if (cfg->peer_pfx && + memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) { + if (!ipv6_addr_any(&ifp->peer_addr)) + cleanup_prefix_route(ifp, expires, true, true); + new_peer = true; + } + spin_lock_bh(&ifp->lock); was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; had_prefixroute = ifp->flags & IFA_F_PERMANENT && @@@ -4687,9 -4668,6 +4685,9 @@@ if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) ifp->rt_priority = cfg->rt_priority;
+ if (new_peer) + ifp->peer_addr = *cfg->peer_pfx; + spin_unlock_bh(&ifp->lock); if (!(ifp->flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); @@@ -4698,7 -4676,7 +4696,7 @@@ int rc = -ENOENT;
if (had_prefixroute) - rc = modify_prefix_route(ifp, expires, flags); + rc = modify_prefix_route(ifp, expires, flags, false);
/* prefix route could have been deleted; if so restore it */ if (rc == -ENOENT) { @@@ -4706,15 -4684,6 +4704,15 @@@ ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } + + if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) + rc = modify_prefix_route(ifp, expires, flags, true); + + if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { + addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, + ifp->rt_priority, ifp->idev->dev, + expires, flags, GFP_KERNEL); + } } else if (had_prefixroute) { enum cleanup_prefix_rt_t action; unsigned long rt_expires; @@@ -4725,7 -4694,7 +4723,7 @@@
if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, rt_expires, - action == CLEANUP_PREFIX_RT_DEL); + action == CLEANUP_PREFIX_RT_DEL, false); } }
@@@ -6012,9 -5981,9 +6010,9 @@@ static void __ipv6_ifa_notify(int event if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); if (!ipv6_addr_any(&ifp->peer_addr)) - addrconf_prefix_route(&ifp->peer_addr, 128, 0, - ifp->idev->dev, 0, 0, - GFP_ATOMIC); + addrconf_prefix_route(&ifp->peer_addr, 128, + ifp->rt_priority, ifp->idev->dev, + 0, 0, GFP_ATOMIC); break; case RTM_DELADDR: if (ifp->idev->cnf.forwarding) diff --combined net/ipv6/seg6_iptunnel.c index 8c52efe299cc,d8afe7290de8..ac837afb9040 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@@ -29,7 -29,7 +29,7 @@@
struct seg6_lwt { struct dst_cache cache; - struct seg6_iptunnel_encap tuninfo[0]; + struct seg6_iptunnel_encap tuninfo[]; };
static inline struct seg6_lwt *seg6_lwt_lwtunnel(struct lwtunnel_state *lwt) @@@ -268,7 -268,7 +268,7 @@@ static int seg6_do_srh(struct sk_buff * skb_mac_header_rebuild(skb); skb_push(skb, skb->mac_len);
- err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE); + err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET); if (err) return err;
diff --combined net/mptcp/options.c index fd2c3150e591,b9a8305bd934..9c71f427e6e3 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@@ -304,21 -304,22 +304,22 @@@ static bool mptcp_established_options_m static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, struct mptcp_ext *ext) { - ext->data_fin = 1; - if (!ext->use_map) { /* RFC6824 requires a DSS mapping with specific values * if DATA_FIN is set but no data payload is mapped */ + ext->data_fin = 1; ext->use_map = 1; ext->dsn64 = 1; - ext->data_seq = mptcp_sk(subflow->conn)->write_seq; + ext->data_seq = subflow->data_fin_tx_seq; ext->subflow_seq = 0; ext->data_len = 1; - } else { - /* If there's an existing DSS mapping, DATA_FIN consumes - * 1 additional byte of mapping space. + } else if (ext->data_seq + ext->data_len == subflow->data_fin_tx_seq) { + /* If there's an existing DSS mapping and it is the + * final mapping, DATA_FIN consumes 1 additional byte of + * mapping space. */ + ext->data_fin = 1; ext->data_len++; } } @@@ -334,8 -335,6 +335,8 @@@ static bool mptcp_established_options_d struct mptcp_sock *msk; unsigned int ack_size; bool ret = false; + bool can_ack; + u64 ack_seq; u8 tcp_fin;
if (skb) { @@@ -356,28 -355,14 +357,27 @@@ if (mpext) opts->ext_copy = *mpext;
- if (skb && tcp_fin && - subflow->conn->sk_state != TCP_ESTABLISHED) + if (skb && tcp_fin && subflow->data_fin_tx_enable) mptcp_write_data_fin(subflow, &opts->ext_copy); ret = true; }
+ /* passive sockets msk will set the 'can_ack' after accept(), even + * if the first subflow may have the already the remote key handy + */ + can_ack = true; opts->ext_copy.use_ack = 0; msk = mptcp_sk(subflow->conn); - if (!msk || !READ_ONCE(msk->can_ack)) { + if (likely(msk && READ_ONCE(msk->can_ack))) { + ack_seq = msk->ack_seq; + } else if (subflow->can_ack) { + mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); + ack_seq++; + } else { + can_ack = false; + } + + if (unlikely(!can_ack)) { *size = ALIGN(dss_size, 4); return ret; } @@@ -390,7 -375,7 +390,7 @@@
dss_size += ack_size;
- opts->ext_copy.data_ack = msk->ack_seq; + opts->ext_copy.data_ack = ack_seq; opts->ext_copy.ack64 = 1; opts->ext_copy.use_ack = 1;
diff --combined net/netlink/af_netlink.c index 5313f1cec170,19df49a6ad15..ed77c75bf63f --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -71,7 -71,7 +71,7 @@@
struct listeners { struct rcu_head rcu; - unsigned long masks[0]; + unsigned long masks[]; };
/* state bits */ @@@ -2434,7 -2434,7 +2434,7 @@@ void netlink_ack(struct sk_buff *in_skb in_skb->len)) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, (u8 *)extack->bad_attr - - in_skb->data)); + (u8 *)nlh)); } else { if (extack->cookie_len) WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, @@@ -2583,6 -2583,7 +2583,7 @@@ static void *__netlink_seq_next(struct }
static void *netlink_seq_start(struct seq_file *seq, loff_t *posp) + __acquires(RCU) { struct nl_seq_iter *iter = seq->private; void *obj = SEQ_START_TOKEN; diff --combined net/sctp/diag.c index 1069d7af3672,69743a6aaf6f..493fc01e5d2b --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@@ -237,11 -237,15 +237,11 @@@ static size_t inet_assoc_attr_size(stru addrcnt++;
return nla_total_size(sizeof(struct sctp_info)) - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ + nla_total_size(addrlen * asoc->peer.transport_count) + nla_total_size(addrlen * addrcnt) - + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64; }
@@@ -428,11 -432,12 +428,12 @@@ static void sctp_diag_get_info(struct s sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo); }
- static int sctp_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + static int sctp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; struct net *net = sock_net(in_skb->sk); + const struct nlmsghdr *nlh = cb->nlh; union sctp_addr laddr, paddr; struct sctp_comm_param commp = { .skb = in_skb, @@@ -466,7 -471,7 +467,7 @@@ }
static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { u32 idiag_states = r->idiag_states; struct net *net = sock_net(skb->sk); diff --combined net/smc/smc_ib.c index 05b825b3cfa4,9239cf881f21..04b6fefb8bce --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@@ -37,11 -37,7 +37,7 @@@ struct smc_ib_devices smc_ib_devices = .list = LIST_HEAD_INIT(smc_ib_devices.list), };
- #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%" - - u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system - * identifier - */ + u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
static int smc_ib_modify_qp_init(struct smc_link *lnk) { @@@ -168,6 -164,15 +164,15 @@@ static inline void smc_ib_define_local_ { memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], sizeof(smcibdev->mac[ibport - 1])); + } + + bool smc_ib_is_valid_local_systemid(void) + { + return !is_zero_ether_addr(&local_systemid[2]); + } + + static void smc_ib_init_local_systemid(void) + { get_random_bytes(&local_systemid[0], 2); }
@@@ -224,8 -229,7 +229,7 @@@ static int smc_ib_remember_port_attr(st rc = smc_ib_fill_mac(smcibdev, ibport); if (rc) goto out; - if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, - sizeof(local_systemid)) && + if (!smc_ib_is_valid_local_systemid() && smc_ib_port_active(smcibdev, ibport)) /* create unique system identifier */ smc_ib_define_local_systemid(smcibdev, ibport); @@@ -257,6 -261,7 +261,7 @@@ static void smc_ib_global_event_handler struct ib_event *ibevent) { struct smc_ib_device *smcibdev; + bool schedule = false; u8 port_idx;
smcibdev = container_of(handler, struct smc_ib_device, event_handler); @@@ -266,22 -271,35 +271,35 @@@ /* terminate all ports on device */ for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) { set_bit(port_idx, &smcibdev->port_event_mask); - set_bit(port_idx, smcibdev->ports_going_away); + if (!test_and_set_bit(port_idx, + smcibdev->ports_going_away)) + schedule = true; } - schedule_work(&smcibdev->port_event_work); + if (schedule) + schedule_work(&smcibdev->port_event_work); break; - case IB_EVENT_PORT_ERR: case IB_EVENT_PORT_ACTIVE: - case IB_EVENT_GID_CHANGE: port_idx = ibevent->element.port_num - 1; - if (port_idx < SMC_MAX_PORTS) { - set_bit(port_idx, &smcibdev->port_event_mask); - if (ibevent->event == IB_EVENT_PORT_ERR) - set_bit(port_idx, smcibdev->ports_going_away); - else if (ibevent->event == IB_EVENT_PORT_ACTIVE) - clear_bit(port_idx, smcibdev->ports_going_away); + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + if (test_and_clear_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); - } + break; + case IB_EVENT_PORT_ERR: + port_idx = ibevent->element.port_num - 1; + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) + schedule_work(&smcibdev->port_event_work); + break; + case IB_EVENT_GID_CHANGE: + port_idx = ibevent->element.port_num - 1; + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + schedule_work(&smcibdev->port_event_work); break; default: break; @@@ -316,11 -334,11 +334,11 @@@ static void smc_ib_qp_event_handler(str case IB_EVENT_QP_FATAL: case IB_EVENT_QP_ACCESS_ERR: port_idx = ibevent->element.qp->port - 1; - if (port_idx < SMC_MAX_PORTS) { - set_bit(port_idx, &smcibdev->port_event_mask); - set_bit(port_idx, smcibdev->ports_going_away); + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); - } break; default: break; @@@ -582,7 -600,6 +600,7 @@@ static void smc_ib_remove_dev(struct ib smc_smcr_terminate_all(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev); ib_unregister_event_handler(&smcibdev->event_handler); + cancel_work_sync(&smcibdev->port_event_work); kfree(smcibdev); }
@@@ -594,6 -611,7 +612,7 @@@ static struct ib_client smc_ib_client
int __init smc_ib_register_client(void) { + smc_ib_init_local_systemid(); return ib_register_client(&smc_ib_client); }
diff --combined net/wireless/nl80211.c index ec5d67794aab,15000275b32d..750b73a52fd8 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -322,6 -322,29 +322,29 @@@ he_obss_pd_policy[NL80211_HE_OBSS_PD_AT NLA_POLICY_RANGE(NLA_U8, 1, 20), };
+ static const struct nla_policy + he_bss_color_policy[NL80211_HE_BSS_COLOR_ATTR_MAX + 1] = { + [NL80211_HE_BSS_COLOR_ATTR_COLOR] = NLA_POLICY_RANGE(NLA_U8, 1, 63), + [NL80211_HE_BSS_COLOR_ATTR_DISABLED] = { .type = NLA_FLAG }, + [NL80211_HE_BSS_COLOR_ATTR_PARTIAL] = { .type = NLA_FLAG }, + }; + + static const struct nla_policy + nl80211_tid_config_attr_policy[NL80211_TID_CONFIG_ATTR_MAX + 1] = { + [NL80211_TID_CONFIG_ATTR_VIF_SUPP] = { .type = NLA_U64 }, + [NL80211_TID_CONFIG_ATTR_PEER_SUPP] = { .type = NLA_U64 }, + [NL80211_TID_CONFIG_ATTR_OVERRIDE] = { .type = NLA_FLAG }, + [NL80211_TID_CONFIG_ATTR_TIDS] = NLA_POLICY_RANGE(NLA_U16, 1, 0xff), + [NL80211_TID_CONFIG_ATTR_NOACK] = + NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), + [NL80211_TID_CONFIG_ATTR_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_TID_CONFIG_ATTR_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_TID_CONFIG_ATTR_AMPDU_CTRL] = + NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), + [NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL] = + NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), + }; + const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, @@@ -362,7 -385,7 +385,7 @@@ [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, - [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5), + [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 7), [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, @@@ -470,8 -493,6 +493,8 @@@ [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_PLINK_STATE] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1), + [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 }, + [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG }, [NL80211_ATTR_MESH_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, @@@ -533,8 -554,6 +556,8 @@@ [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 }, + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 }, [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, @@@ -565,7 -584,6 +588,7 @@@ NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, + [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_MAC_MASK] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN @@@ -632,6 -650,9 +655,9 @@@ [NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG }, [NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy), [NL80211_ATTR_VLAN_ID] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2), + [NL80211_ATTR_HE_BSS_COLOR] = NLA_POLICY_NESTED(he_bss_color_policy), + [NL80211_ATTR_TID_CONFIG] = + NLA_POLICY_NESTED_ARRAY(nl80211_tid_config_attr_policy), };
/* policy for the key attributes */ @@@ -971,6 -992,9 +997,9 @@@ static int nl80211_msg_put_channel(stru if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_NO_HE) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE)) + goto nla_put_failure; }
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, @@@ -1032,7 -1056,7 +1061,7 @@@ struct key_parse struct key_params p; int idx; int type; - bool def, defmgmt; + bool def, defmgmt, defbeacon; bool def_uni, def_multi; };
@@@ -1048,12 -1072,13 +1077,13 @@@ static int nl80211_parse_key_new(struc
k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; + k->defbeacon = !!tb[NL80211_KEY_DEFAULT_BEACON];
if (k->def) { k->def_uni = true; k->def_multi = true; } - if (k->defmgmt) + if (k->defmgmt || k->defbeacon) k->def_multi = true;
if (tb[NL80211_KEY_IDX]) @@@ -1160,14 -1185,17 +1190,17 @@@ static int nl80211_parse_key(struct gen if (err) return err;
- if (k->def && k->defmgmt) { - GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid"); + if ((k->def ? 1 : 0) + (k->defmgmt ? 1 : 0) + + (k->defbeacon ? 1 : 0) > 1) { + GENL_SET_ERR_MSG(info, + "key with multiple default flags is invalid"); return -EINVAL; }
- if (k->defmgmt) { + if (k->defmgmt || k->defbeacon) { if (k->def_uni || !k->def_multi) { - GENL_SET_ERR_MSG(info, "defmgmt key must be mcast"); + GENL_SET_ERR_MSG(info, + "defmgmt/defbeacon key must be mcast"); return -EINVAL; } } @@@ -1179,14 -1207,20 +1212,20 @@@ "defmgmt key idx not 4 or 5"); return -EINVAL; } + } else if (k->defbeacon) { + if (k->idx < 6 || k->idx > 7) { + GENL_SET_ERR_MSG(info, + "defbeacon key idx not 6 or 7"); + return -EINVAL; + } } else if (k->def) { if (k->idx < 0 || k->idx > 3) { GENL_SET_ERR_MSG(info, "def key idx not 0-3"); return -EINVAL; } } else { - if (k->idx < 0 || k->idx > 5) { - GENL_SET_ERR_MSG(info, "key idx not 0-5"); + if (k->idx < 0 || k->idx > 7) { + GENL_SET_ERR_MSG(info, "key idx not 0-7"); return -EINVAL; } } @@@ -1892,6 -1926,88 +1931,88 @@@ static int nl80211_send_pmsr_capa(struc return 0; }
+ static int + nl80211_put_iftype_akm_suites(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) + { + int i; + struct nlattr *nested, *nested_akms; + const struct wiphy_iftype_akm_suites *iftype_akms; + + if (!rdev->wiphy.num_iftype_akm_suites || + !rdev->wiphy.iftype_akm_suites) + return 0; + + nested = nla_nest_start(msg, NL80211_ATTR_IFTYPE_AKM_SUITES); + if (!nested) + return -ENOBUFS; + + for (i = 0; i < rdev->wiphy.num_iftype_akm_suites; i++) { + nested_akms = nla_nest_start(msg, i + 1); + if (!nested_akms) + return -ENOBUFS; + + iftype_akms = &rdev->wiphy.iftype_akm_suites[i]; + + if (nl80211_put_iftypes(msg, NL80211_IFTYPE_AKM_ATTR_IFTYPES, + iftype_akms->iftypes_mask)) + return -ENOBUFS; + + if (nla_put(msg, NL80211_IFTYPE_AKM_ATTR_SUITES, + sizeof(u32) * iftype_akms->n_akm_suites, + iftype_akms->akm_suites)) { + return -ENOBUFS; + } + nla_nest_end(msg, nested_akms); + } + + nla_nest_end(msg, nested); + + return 0; + } + + static int + nl80211_put_tid_config_support(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) + { + struct nlattr *supp; + + if (!rdev->wiphy.tid_config_support.vif && + !rdev->wiphy.tid_config_support.peer) + return 0; + + supp = nla_nest_start(msg, NL80211_ATTR_TID_CONFIG); + if (!supp) + return -ENOSPC; + + if (rdev->wiphy.tid_config_support.vif && + nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_VIF_SUPP, + rdev->wiphy.tid_config_support.vif, + NL80211_TID_CONFIG_ATTR_PAD)) + goto fail; + + if (rdev->wiphy.tid_config_support.peer && + nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_PEER_SUPP, + rdev->wiphy.tid_config_support.peer, + NL80211_TID_CONFIG_ATTR_PAD)) + goto fail; + + /* for now we just use the same value ... makes more sense */ + if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_SHORT, + rdev->wiphy.tid_config_support.max_retry)) + goto fail; + if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_LONG, + rdev->wiphy.tid_config_support.max_retry)) + goto fail; + + nla_nest_end(msg, supp); + + return 0; + fail: + nla_nest_cancel(msg, supp); + return -ENOBUFS; + } + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@@ -2450,6 -2566,12 +2571,12 @@@ static int nl80211_send_wiphy(struct cf rdev->wiphy.akm_suites)) goto nla_put_failure;
+ if (nl80211_put_iftype_akm_suites(rdev, msg)) + goto nla_put_failure; + + if (nl80211_put_tid_config_support(rdev, msg)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@@ -3481,7 -3603,7 +3608,7 @@@ static int nl80211_valid_4addr(struct c enum nl80211_iftype iftype) { if (!use_4addr) { - if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT)) + if (netdev && netif_is_bridge_port(netdev)) return -EBUSY; return 0; } @@@ -3769,8 -3891,14 +3896,14 @@@ static int nl80211_get_key(struct sk_bu void *hdr; struct sk_buff *msg;
- if (info->attrs[NL80211_ATTR_KEY_IDX]) + if (info->attrs[NL80211_ATTR_KEY_IDX]) { key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); + if (key_idx > 5 && + !wiphy_ext_feature_isset( + &rdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION)) + return -EINVAL; + }
if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@@ -3846,7 -3974,7 +3979,7 @@@ static int nl80211_set_key(struct sk_bu /* Only support setting default key and * Extended Key ID action NL80211_KEY_SET_TX. */ - if (!key.def && !key.defmgmt && + if (!key.def && !key.defmgmt && !key.defbeacon && !(key.p.mode == NL80211_KEY_SET_TX)) return -EINVAL;
@@@ -3893,6 -4021,24 +4026,24 @@@ #ifdef CONFIG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; #endif + } else if (key.defbeacon) { + if (key.def_uni || !key.def_multi) { + err = -EINVAL; + goto out; + } + + if (!rdev->ops->set_default_beacon_key) { + err = -EOPNOTSUPP; + goto out; + } + + err = nl80211_key_allowed(dev->ieee80211_ptr); + if (err) + goto out; + + err = rdev_set_default_beacon_key(rdev, dev, key.idx); + if (err) + goto out; } else if (key.p.mode == NL80211_KEY_SET_TX && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID)) { @@@ -3930,8 -4076,10 +4081,10 @@@ static int nl80211_new_key(struct sk_bu if (err) return err;
- if (!key.p.key) + if (!key.p.key) { + GENL_SET_ERR_MSG(info, "no key"); return -EINVAL; + }
if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@@ -3945,8 -4093,10 +4098,10 @@@
/* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && - key.type != NL80211_KEYTYPE_GROUP) + key.type != NL80211_KEYTYPE_GROUP) { + GENL_SET_ERR_MSG(info, "key type not pairwise or group"); return -EINVAL; + }
if (key.type == NL80211_KEYTYPE_GROUP && info->attrs[NL80211_ATTR_VLAN_ID]) @@@ -3957,15 -4107,22 +4112,22 @@@
if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, - mac_addr)) + mac_addr)) { + GENL_SET_ERR_MSG(info, "key setting validation failed"); return -EINVAL; + }
wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); - if (!err) + if (err) + GENL_SET_ERR_MSG(info, "key not allowed"); + if (!err) { err = rdev_add_key(rdev, dev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); + if (err) + GENL_SET_ERR_MSG(info, "key addition failed"); + } wdev_unlock(dev->ieee80211_ptr);
return err; @@@ -4518,6 -4675,30 +4680,30 @@@ static int nl80211_parse_he_obss_pd(str return 0; }
+ static int nl80211_parse_he_bss_color(struct nlattr *attrs, + struct cfg80211_he_bss_color *he_bss_color) + { + struct nlattr *tb[NL80211_HE_BSS_COLOR_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NL80211_HE_BSS_COLOR_ATTR_MAX, attrs, + he_bss_color_policy, NULL); + if (err) + return err; + + if (!tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]) + return -EINVAL; + + he_bss_color->color = + nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]); + he_bss_color->disabled = + nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); + he_bss_color->partial = + nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]); + + return 0; + } + static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, const u8 *rates) { @@@ -4809,6 -4990,14 +4995,14 @@@ static int nl80211_start_ap(struct sk_b goto out; }
+ if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) { + err = nl80211_parse_he_bss_color( + info->attrs[NL80211_ATTR_HE_BSS_COLOR], + ¶ms.he_bss_color); + if (err) + return err; + } + nl80211_calculate_ap_params(¶ms);
if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) @@@ -10545,8 -10734,9 +10739,9 @@@ static int nl80211_register_mgmt(struc return -EOPNOTSUPP;
return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type, - nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), - nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); + nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), + nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]), + info->extack); }
static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) @@@ -13808,6 -13998,141 +14003,141 @@@ static int nl80211_probe_mesh_link(stru return rdev_probe_mesh_link(rdev, dev, dest, buf, len); }
+ static int parse_tid_conf(struct cfg80211_registered_device *rdev, + struct nlattr *attrs[], struct net_device *dev, + struct cfg80211_tid_cfg *tid_conf, + struct genl_info *info, const u8 *peer) + { + struct netlink_ext_ack *extack = info->extack; + u64 mask; + int err; + + if (!attrs[NL80211_TID_CONFIG_ATTR_TIDS]) + return -EINVAL; + + tid_conf->config_override = + nla_get_flag(attrs[NL80211_TID_CONFIG_ATTR_OVERRIDE]); + tid_conf->tids = nla_get_u16(attrs[NL80211_TID_CONFIG_ATTR_TIDS]); + + if (tid_conf->config_override) { + if (rdev->ops->reset_tid_config) { + err = rdev_reset_tid_config(rdev, dev, peer, + tid_conf->tids); + /* If peer is there no other configuration will be + * allowed + */ + if (err || peer) + return err; + } else { + return -EINVAL; + } + } + + if (attrs[NL80211_TID_CONFIG_ATTR_NOACK]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_NOACK); + tid_conf->noack = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_NOACK]); + } + + if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT); + tid_conf->retry_short = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]); + + if (tid_conf->retry_short > rdev->wiphy.max_data_retry_count) + return -EINVAL; + } + + if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG); + tid_conf->retry_long = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]); + + if (tid_conf->retry_long > rdev->wiphy.max_data_retry_count) + return -EINVAL; + } + + if (attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); + tid_conf->ampdu = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]); + } + + if (attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL); + tid_conf->rtscts = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]); + } + + if (peer) + mask = rdev->wiphy.tid_config_support.peer; + else + mask = rdev->wiphy.tid_config_support.vif; + + if (tid_conf->mask & ~mask) { + NL_SET_ERR_MSG(extack, "unsupported TID configuration"); + return -ENOTSUPP; + } + + return 0; + } + + static int nl80211_set_tid_config(struct sk_buff *skb, + struct genl_info *info) + { + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct nlattr *attrs[NL80211_TID_CONFIG_ATTR_MAX + 1]; + struct net_device *dev = info->user_ptr[1]; + struct cfg80211_tid_config *tid_config; + struct nlattr *tid; + int conf_idx = 0, rem_conf; + int ret = -EINVAL; + u32 num_conf = 0; + + if (!info->attrs[NL80211_ATTR_TID_CONFIG]) + return -EINVAL; + + if (!rdev->ops->set_tid_config) + return -EOPNOTSUPP; + + nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG], + rem_conf) + num_conf++; + + tid_config = kzalloc(struct_size(tid_config, tid_conf, num_conf), + GFP_KERNEL); + if (!tid_config) + return -ENOMEM; + + tid_config->n_tid_conf = num_conf; + + if (info->attrs[NL80211_ATTR_MAC]) + tid_config->peer = nla_data(info->attrs[NL80211_ATTR_MAC]); + + nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG], + rem_conf) { + ret = nla_parse_nested(attrs, NL80211_TID_CONFIG_ATTR_MAX, + tid, NULL, NULL); + + if (ret) + goto bad_tid_conf; + + ret = parse_tid_conf(rdev, attrs, dev, + &tid_config->tid_conf[conf_idx], + info, tid_config->peer); + if (ret) + goto bad_tid_conf; + + conf_idx++; + } + + ret = rdev_set_tid_config(rdev, dev, tid_config); + + bad_tid_conf: + kfree(tid_config); + return ret; + } + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@@ -14762,6 -15087,13 +15092,13 @@@ static const struct genl_ops nl80211_op .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_SET_TID_CONFIG, + .doit = nl80211_set_tid_config, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV | + NL80211_FLAG_NEED_RTNL, + }, };
static struct genl_family nl80211_fam __ro_after_init = {
linux-merge@lists.open-mesh.org