The following commit has been merged in the master branch: commit bfacd0c0a1399d3151b65f8ccf406dede1c129cd Merge: b1dc171716cdcb42b9e6aff46b1b9859ae2af2ed e3b3a87967cef1fa157d93fd726960b1b812401d Author: Stephen Rothwell sfr@canb.auug.org.au Date: Thu Aug 24 10:49:14 2023 +1000
Merge branch 'main' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
# Conflicts: # drivers/net/ethernet/freescale/fs_enet/fs_enet.h # include/net/inet_sock.h
diff --combined Documentation/devicetree/bindings/net/can/bosch,m_can.yaml index 76c5024b6423,bb518c831f7b..f9ffb963d6b1 --- a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml +++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml @@@ -122,16 -122,15 +122,15 @@@ required - compatible - reg - reg-names - - interrupts - - interrupt-names - clocks - clock-names - bosch,mram-cfg
-additionalProperties: false +unevaluatedProperties: false
examples: - | + // Example with interrupts #include <dt-bindings/clock/imx6sx-clock.h> can@20e8000 { compatible = "bosch,m_can"; @@@ -149,4 -148,21 +148,21 @@@ }; };
+ - | + // Example with timer polling + #include <dt-bindings/clock/imx6sx-clock.h> + can@20e8000 { + compatible = "bosch,m_can"; + reg = <0x020e8000 0x4000>, <0x02298000 0x4000>; + reg-names = "m_can", "message_ram"; + clocks = <&clks IMX6SX_CLK_CANFD>, + <&clks IMX6SX_CLK_CANFD>; + clock-names = "hclk", "cclk"; + bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>; + + can-transceiver { + max-bitrate = <5000000>; + }; + }; + ... diff --combined Documentation/netlink/genetlink-c.yaml index 2627a384ae01,4c1f8c22627b..9806c44f604c --- a/Documentation/netlink/genetlink-c.yaml +++ b/Documentation/netlink/genetlink-c.yaml @@@ -41,7 -41,7 +41,7 @@@ properties description: Name of the define for the family name. type: string c-version-name: - description: Name of the define for the verion of the family. + description: Name of the define for the version of the family. type: string max-by-define: description: Makes the number of attributes and commands be specified by a define, not an enum value. @@@ -274,7 -274,7 +274,7 @@@ description: Kernel attribute validation flags. type: array items: - enum: [ strict, dump ] + enum: [ strict, dump, dump-strict ] do: &subop-type description: Main command handler. type: object diff --combined Documentation/netlink/genetlink-legacy.yaml index 30803dc21123,196076dfa309..12a0a045605d --- a/Documentation/netlink/genetlink-legacy.yaml +++ b/Documentation/netlink/genetlink-legacy.yaml @@@ -41,7 -41,7 +41,7 @@@ properties description: Name of the define for the family name. type: string c-version-name: - description: Name of the define for the verion of the family. + description: Name of the define for the version of the family. type: string max-by-define: description: Makes the number of attributes and commands be specified by a define, not an enum value. @@@ -321,7 -321,7 +321,7 @@@ description: Kernel attribute validation flags. type: array items: - enum: [ strict, dump ] + enum: [ strict, dump, dump-strict ] # Start genetlink-legacy fixed-header: *fixed-header # End genetlink-legacy diff --combined MAINTAINERS index e3af8fa764a0,9cc15c50c2c6..eb582e6bd6cb --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1074,6 -1074,7 +1074,6 @@@ F: include/soc/amlogic
AMPHION VPU CODEC V4L2 DRIVER M: Ming Qian ming.qian@nxp.com -M: Shijie Qin shijie.qin@nxp.com M: Zhou Peng eagle.zhou@nxp.com L: linux-media@vger.kernel.org S: Maintained @@@ -1557,10 -1558,9 +1557,10 @@@ M: Olof Johansson <olof@lixom.net M: soc@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +P: Documentation/process/maintainer-soc.rst C: irc://irc.libera.chat/armlinux T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git -F: Documentation/process/maintainer-soc.rst +F: Documentation/process/maintainer-soc*.rst F: arch/arm/boot/dts/Makefile F: arch/arm64/boot/dts/Makefile
@@@ -1843,7 -1843,6 +1843,7 @@@ F: Documentation/devicetree/bindings/ph F: arch/arm/boot/dts/amlogic/ F: arch/arm/mach-meson/ F: arch/arm64/boot/dts/amlogic/ +F: drivers/genpd/amlogic/ F: drivers/mmc/host/meson* F: drivers/phy/amlogic/ F: drivers/pinctrl/meson/ @@@ -1906,12 -1905,12 +1906,12 @@@ F: drivers/bluetooth/hci_bcm4377. F: drivers/clk/clk-apple-nco.c F: drivers/cpufreq/apple-soc-cpufreq.c F: drivers/dma/apple-admac.c +F: drivers/genpd/apple/ F: drivers/i2c/busses/i2c-pasemi-core.c F: drivers/i2c/busses/i2c-pasemi-platform.c F: drivers/iommu/apple-dart.c F: drivers/iommu/io-pgtable-dart.c F: drivers/irqchip/irq-apple-aic.c -F: drivers/mailbox/apple-mailbox.c F: drivers/nvme/host/apple.c F: drivers/nvmem/apple-efuses.c F: drivers/pinctrl/pinctrl-apple-gpio.c @@@ -1920,6 -1919,7 +1920,6 @@@ F: drivers/soc/apple/ F: drivers/watchdog/apple_wdt.c F: include/dt-bindings/interrupt-controller/apple-aic.h F: include/dt-bindings/pinctrl/apple.h -F: include/linux/apple-mailbox.h F: include/linux/soc/apple/*
ARM/ARTPEC MACHINE SUPPORT @@@ -2421,7 -2421,6 +2421,7 @@@ F: arch/arm/mach-ux500 F: drivers/clk/clk-nomadik.c F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/dma/ste_dma40* +F: drivers/genpd/st/ste-ux500-pm-domain.c F: drivers/hwspinlock/u8500_hsem.c F: drivers/i2c/busses/i2c-nomadik.c F: drivers/iio/adc/ab8500-gpadc.c @@@ -2494,6 -2493,16 +2494,6 @@@ S: Maintaine W: http://www.digriz.org.uk/ts78xx/kernel F: arch/arm/mach-orion5x/ts78xx-*
-ARM/OXNAS platform support -M: Neil Armstrong neil.armstrong@linaro.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: linux-oxnas@groups.io (moderated for non-subscribers) -S: Maintained -F: arch/arm/boot/dts/ox8*.dts* -F: arch/arm/mach-oxnas/ -F: drivers/power/reset/oxnas-restart.c -N: oxnas - ARM/QUALCOMM CHROMEBOOK SUPPORT R: cros-qcom-dts-watchers@chromium.org F: arch/arm64/boot/dts/qcom/sc7180* @@@ -2584,7 -2593,6 +2584,7 @@@ F: arch/arm/include/debug/renesas-scif. F: arch/arm/mach-shmobile/ F: arch/arm64/boot/dts/renesas/ F: arch/riscv/boot/dts/renesas/ +F: drivers/genpd/renesas/ F: drivers/soc/renesas/ F: include/linux/soc/renesas/ K: \brenesas, @@@ -2627,7 -2635,6 +2627,7 @@@ R: Alim Akhtar <alim.akhtar@samsung.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org S: Maintained +P: Documentation/process/maintainer-soc-clean-dts.rst Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ B: mailto:linux-samsung-soc@vger.kernel.org C: irc://irc.libera.chat/linux-exynos @@@ -2922,13 -2929,14 +2922,13 @@@ M: Sudeep Holla <sudeep.holla@arm.com M: Lorenzo Pieralisi lpieralisi@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: */*/*/vexpress* -F: */*/vexpress* -F: arch/arm/boot/dts/arm/vexpress* +N: mps2 +N: vexpress F: arch/arm/mach-versatile/ F: arch/arm64/boot/dts/arm/ -F: drivers/clk/versatile/clk-vexpress-osc.c F: drivers/clocksource/timer-versatile.c -N: mps2 +X: drivers/cpufreq/vexpress-spc-cpufreq.c +X: Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
ARM/VFP SUPPORT M: Russell King linux@armlinux.org.uk @@@ -3677,6 -3685,7 +3677,7 @@@ F: include/linux/filter. F: include/linux/tnum.h F: kernel/bpf/core.c F: kernel/bpf/dispatcher.c + F: kernel/bpf/mprog.c F: kernel/bpf/syscall.c F: kernel/bpf/tnum.c F: kernel/bpf/trampoline.c @@@ -3687,7 -3696,7 +3688,7 @@@ R: David Vernet <void@manifault.com L: bpf@vger.kernel.org L: bpf@ietf.org S: Maintained - F: Documentation/bpf/instruction-set.rst + F: Documentation/bpf/standardization/
BPF [GENERAL] (Safe Dynamic Programs and Tools) M: Alexei Starovoitov ast@kernel.org @@@ -3695,7 -3704,7 +3696,7 @@@ M: Daniel Borkmann <daniel@iogearbox.ne M: Andrii Nakryiko andrii@kernel.org R: Martin KaFai Lau martin.lau@linux.dev R: Song Liu song@kernel.org - R: Yonghong Song yhs@fb.com + R: Yonghong Song yonghong.song@linux.dev R: John Fastabend john.fastabend@gmail.com R: KP Singh kpsingh@kernel.org R: Stanislav Fomichev sdf@google.com @@@ -3734,7 -3743,7 +3735,7 @@@ F: tools/lib/bpf F: tools/testing/selftests/bpf/
BPF [ITERATOR] - M: Yonghong Song yhs@fb.com + M: Yonghong Song yonghong.song@linux.dev L: bpf@vger.kernel.org S: Maintained F: kernel/bpf/*iter.c @@@ -3770,13 -3779,15 +3771,15 @@@ L: netdev@vger.kernel.or S: Maintained F: kernel/bpf/bpf_struct*
- BPF [NETWORKING] (tc BPF, sock_addr) + BPF [NETWORKING] (tcx & tc BPF, sock_addr) M: Martin KaFai Lau martin.lau@linux.dev M: Daniel Borkmann daniel@iogearbox.net R: John Fastabend john.fastabend@gmail.com L: bpf@vger.kernel.org L: netdev@vger.kernel.org S: Maintained + F: include/net/tcx.h + F: kernel/bpf/tcx.c F: net/core/filter.c F: net/sched/act_bpf.c F: net/sched/cls_bpf.c @@@ -3828,6 -3839,15 +3831,15 @@@ S: Maintaine F: kernel/bpf/stackmap.c F: kernel/trace/bpf_trace.c
+ BROADCOM ASP 2.0 ETHERNET DRIVER + M: Justin Chen justin.chen@broadcom.com + M: Florian Fainelli florian.fainelli@broadcom.com + L: bcm-kernel-feedback-list@broadcom.com + L: netdev@vger.kernel.org + S: Supported + F: Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml + F: drivers/net/ethernet/broadcom/asp2/ + BROADCOM B44 10/100 ETHERNET DRIVER M: Michael Chan michael.chan@broadcom.com L: netdev@vger.kernel.org @@@ -4000,7 -4020,7 +4012,7 @@@ F: arch/mips/kernel/*bmips F: drivers/irqchip/irq-bcm63* F: drivers/irqchip/irq-bcm7* F: drivers/irqchip/irq-brcmstb* -F: drivers/soc/bcm/bcm63xx +F: drivers/genpd/bcm/bcm63xx-power.c F: include/linux/bcm963xx_nvram.h F: include/linux/bcm963xx_tag.h
@@@ -4222,7 -4242,7 +4234,7 @@@ R: Broadcom internal kernel review lis L: linux-pm@vger.kernel.org S: Maintained T: git https://github.com/broadcom/stblinux.git -F: drivers/soc/bcm/bcm63xx/bcm-pmb.c +F: drivers/genpd/bcm/bcm-pmb.c F: include/dt-bindings/soc/bcm-pmb.h
BROADCOM SPECIFIC AMBA DRIVER (BCMA) @@@ -4442,7 -4462,6 +4454,7 @@@ M: Maxime Ripard <mripard@kernel.org L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/cdns,*.txt +F: Documentation/devicetree/bindings/media/cdns,csi2rx.yaml F: drivers/media/platform/cadence/cdns-csi2*
CADENCE NAND DRIVER @@@ -5362,7 -5381,7 +5374,7 @@@ M: Kukjin Kim <kgene@kernel.org R: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org -S: Supported +S: Maintained F: arch/arm/mach-exynos/pm.c F: drivers/cpuidle/cpuidle-exynos.c F: include/linux/platform_data/cpuidle-exynos.h @@@ -6207,7 -6226,6 +6219,7 @@@ DOCUMENTATION PROCES M: Jonathan Corbet corbet@lwn.net L: workflows@vger.kernel.org S: Maintained +F: Documentation/maintainer/ F: Documentation/process/
DOCUMENTATION REPORTING ISSUES @@@ -6245,17 -6263,11 +6257,17 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml F: drivers/media/i2c/dw9714.c
-DONGWOON DW9768 LENS VOICE COIL DRIVER -M: Dongchun Zhu dongchun.zhu@mediatek.com +DONGWOON DW9719 LENS VOICE COIL DRIVER +M: Daniel Scally djrscally@gmail.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git +F: drivers/media/i2c/dw9719.c + +DONGWOON DW9768 LENS VOICE COIL DRIVER +L: linux-media@vger.kernel.org +S: Orphan +T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml F: drivers/media/i2c/dw9768.c
@@@ -7593,6 -7605,13 +7605,13 @@@ L: linux-mmc@vger.kernel.or S: Supported F: drivers/mmc/host/cqhci*
+ EMS CPC-PCI CAN DRIVER + M: Gerhard Uttenthaler uttenthaler@ems-wuensche.com + M: support@ems-wuensche.com + L: linux-can@vger.kernel.org + S: Maintained + F: drivers/net/can/sja1000/ems_pci.c + EMULEX 10Gbps iSCSI - OneConnect DRIVER M: Ketan Mukadam ketan.mukadam@broadcom.com L: linux-scsi@vger.kernel.org @@@ -7732,6 -7751,7 +7751,7 @@@ F: include/linux/mii. F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h + F: include/linux/phylib_stubs.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/linux/platform_data/mdio-gpio.h F: include/trace/events/mdio.h @@@ -8351,7 -8371,6 +8371,6 @@@ L: linuxppc-dev@lists.ozlabs.or L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/fs_enet/ - F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS M: Shengjiu Wang shengjiu.wang@gmail.com @@@ -8402,6 -8421,13 +8421,6 @@@ F: Documentation/power/freezing-of-task F: include/linux/freezer.h F: kernel/freezer.c
-FRONTSWAP API -M: Konrad Rzeszutek Wilk konrad.wilk@oracle.com -L: linux-kernel@vger.kernel.org -S: Maintained -F: include/linux/frontswap.h -F: mm/frontswap.c - FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS M: David Howells dhowells@redhat.com L: linux-cachefs@redhat.com (moderated for non-subscribers) @@@ -8666,13 -8692,6 +8685,13 @@@ F: Documentation/devicetree/bindings/po F: drivers/base/power/domain*.c F: include/linux/pm_domain.h
+GENERIC PM DOMAIN PROVIDERS +M: Ulf Hansson ulf.hansson@linaro.org +L: linux-pm@vger.kernel.org +S: Supported +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git +F: drivers/genpd/ + GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER M: Eugen Hristev eugen.hristev@microchip.com L: linux-input@vger.kernel.org @@@ -8812,7 -8831,6 +8831,7 @@@ R: Michael Walle <michael@walle.cc S: Maintained F: drivers/gpio/gpio-regmap.c F: include/linux/gpio/regmap.h +K: (devm_)?gpio_regmap_(un)?register
GPIO SUBSYSTEM M: Linus Walleij linus.walleij@linaro.org @@@ -9319,13 -9337,6 +9338,13 @@@ W: https://www.hisilicon.co F: Documentation/devicetree/bindings/i2c/hisilicon,ascend910-i2c.yaml F: drivers/i2c/busses/i2c-hisi.c
+HISILICON KUNPENG SOC HCCS DRIVER +M: Huisong Li lihuisong@huawei.com +S: Maintained +F: Documentation/ABI/testing/sysfs-devices-platform-kunpeng_hccs +F: drivers/soc/hisilicon/kunpeng_hccs.c +F: drivers/soc/hisilicon/kunpeng_hccs.h + HISILICON LPC BUS DRIVER M: Jay Fang f.fangjian@huawei.com S: Maintained @@@ -9502,12 -9513,6 +9521,12 @@@ S: Maintaine W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi F: fs/hpfs/
+HS3001 Hardware Temperature and Humidity Sensor +M: Andre Werner andre.werner@systec-electronic.com +L: linux-hwmon@vger.kernel.org +S: Maintained +F: drivers/hwmon/hs3001.c + HSI SUBSYSTEM M: Sebastian Reichel sre@kernel.org S: Maintained @@@ -9611,7 -9616,7 +9630,7 @@@ S: Maintaine F: arch/x86/kernel/cpu/hygon.c
HYNIX HI556 SENSOR DRIVER -M: Shawn Tu shawnx.tu@intel.com +M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -9624,7 -9629,7 +9643,7 @@@ S: Maintaine F: drivers/media/i2c/hi846.c
HYNIX HI847 SENSOR DRIVER -M: Shawn Tu shawnx.tu@intel.com +M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/hi847.c @@@ -9695,14 -9700,6 +9714,14 @@@ L: linux-acpi@vger.kernel.or S: Maintained F: drivers/i2c/i2c-core-acpi.c
+I2C ADDRESS TRANSLATOR (ATR) +M: Tomi Valkeinen tomi.valkeinen@ideasonboard.com +R: Luca Ceresoli luca.ceresoli@bootlin.com +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/i2c-atr.c +F: include/linux/i2c-atr.h + I2C CONTROLLER DRIVER FOR NVIDIA GPU M: Ajay Gupta ajayg@nvidia.com L: linux-i2c@vger.kernel.org @@@ -11317,7 -11314,6 +11336,7 @@@ F: scripts/dummy-tools F: scripts/mk* F: scripts/mod/ F: scripts/package/ +F: usr/
KERNEL HARDENING (not covered by other areas) M: Kees Cook keescook@chromium.org @@@ -12265,8 -12261,8 +12284,8 @@@ R: WANG Xuerui <kernel@xen0n.name L: loongarch@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git -F: Documentation/loongarch/ -F: Documentation/translations/zh_CN/loongarch/ +F: Documentation/arch/loongarch/ +F: Documentation/translations/zh_CN/arch/loongarch/ F: arch/loongarch/ F: drivers/*/*loongarch*
@@@ -12299,13 -12295,6 +12318,13 @@@ S: Maintaine F: Documentation/devicetree/bindings/hwinfo/loongson,ls2k-chipid.yaml F: drivers/soc/loongson/loongson2_guts.c
+LOONGSON-2 SOC SERIES PM DRIVER +M: Yinbo Zhu zhuyinbo@loongson.cn +L: linux-pm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/soc/loongson/loongson,ls2k-pmc.yaml +F: drivers/soc/loongson/loongson2_pm.c + LOONGSON-2 SOC SERIES PINCTRL DRIVER M: zhanghongchen zhanghongchen@loongson.cn M: Yinbo Zhu zhuyinbo@loongson.cn @@@ -12314,14 -12303,6 +12333,14 @@@ S: Maintaine F: Documentation/devicetree/bindings/pinctrl/loongson,ls2k-pinctrl.yaml F: drivers/pinctrl/pinctrl-loongson2.c
+LOONGSON-2 SOC SERIES THERMAL DRIVER +M: zhanghongchen zhanghongchen@loongson.cn +M: Yinbo Zhu zhuyinbo@loongson.cn +L: linux-pm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml +F: drivers/thermal/loongson2_thermal.c + LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) M: Sathya Prakash sathya.prakash@broadcom.com M: Sreekanth Reddy sreekanth.reddy@broadcom.com @@@ -12863,7 -12844,7 +12882,7 @@@ F: drivers/power/supply/max77976_charge MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org L: linux-pm@vger.kernel.org -S: Supported +S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org F: Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml F: Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml @@@ -12874,7 -12855,7 +12893,7 @@@ MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS M: Chanwoo Choi cw00.choi@samsung.com M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org L: linux-kernel@vger.kernel.org -S: Supported +S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org F: Documentation/devicetree/bindings/*/maxim,max14577.yaml F: Documentation/devicetree/bindings/*/maxim,max77686.yaml @@@ -13042,21 -13023,17 +13061,21 @@@ F: drivers/staging/media/imx F: include/linux/imx-media.h F: include/media/imx.h
-MEDIA DRIVERS FOR FREESCALE IMX7 +MEDIA DRIVERS FOR FREESCALE IMX7/8 M: Rui Miguel Silva rmfrfs@gmail.com M: Laurent Pinchart laurent.pinchart@ideasonboard.com +M: Martin Kepplinger martin.kepplinger@puri.sm +R: Purism Kernel Team kernel@puri.sm L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git F: Documentation/admin-guide/media/imx7.rst F: Documentation/devicetree/bindings/media/nxp,imx-mipi-csi2.yaml F: Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml +F: Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml F: drivers/media/platform/nxp/imx-mipi-csis.c F: drivers/media/platform/nxp/imx7-media-csi.c +F: drivers/media/platform/nxp/imx8mq-mipi-csi2.c
MEDIA DRIVERS FOR HELENE M: Abylay Ospan aospan@netup.ru @@@ -14210,7 -14187,7 +14229,7 @@@ W: http://www.linux-mips.org Q: https://patchwork.kernel.org/project/linux-mips/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git F: Documentation/devicetree/bindings/mips/ -F: Documentation/mips/ +F: Documentation/arch/mips/ F: arch/mips/ F: drivers/platform/mips/ F: include/dt-bindings/mips/ @@@ -14681,7 -14658,7 +14700,7 @@@ F: drivers/rtc/rtc-ntxec. F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS - M: Simon Horman simon.horman@corigine.com + M: Louis Peens louis.peens@corigine.com R: Jakub Kicinski kuba@kernel.org L: oss-drivers@corigine.com S: Maintained @@@ -14844,16 -14821,6 +14863,16 @@@ F: net/netfilter/xt_CONNSECMARK. F: net/netfilter/xt_SECMARK.c F: net/netlabel/
+NETWORKING [MACSEC] +M: Sabrina Dubroca sd@queasysnail.net +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/macsec.c +F: include/net/macsec.h +F: include/uapi/linux/if_macsec.h +K: macsec +K: \bmdo_ + NETWORKING [MPTCP] M: Matthieu Baerts matthieu.baerts@tessares.net M: Mat Martineau martineau@kernel.org @@@ -14876,6 -14843,7 +14895,6 @@@ NETWORKING [TCP M: Eric Dumazet edumazet@google.com L: netdev@vger.kernel.org S: Maintained -F: include/linux/net_mm.h F: include/linux/tcp.h F: include/net/tcp.h F: include/trace/events/tcp.h @@@ -15564,7 -15532,6 +15583,7 @@@ W: http://www.muru.com/linux/omap W: http://linux.omap.com/ Q: http://patchwork.kernel.org/project/linux-omap/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git +F: Documentation/devicetree/bindings/arm/ti/omap.yaml F: arch/arm/configs/omap2plus_defconfig F: arch/arm/mach-omap2/ F: drivers/bus/ti-sysc.c @@@ -15601,7 -15568,7 +15620,7 @@@ F: Documentation/filesystems/omfs.rs F: fs/omfs/
OMNIVISION OG01A1B SENSOR DRIVER -M: Shawn Tu shawnx.tu@intel.com +M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/og01a1b.c @@@ -15614,8 -15581,9 +15633,8 @@@ T: git git://linuxtv.org/media_tree.gi F: drivers/media/i2c/ov01a10.c
OMNIVISION OV02A10 SENSOR DRIVER -M: Dongchun Zhu dongchun.zhu@mediatek.com L: linux-media@vger.kernel.org -S: Maintained +S: Orphan T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml F: drivers/media/i2c/ov02a10.c @@@ -15650,7 -15618,6 +15669,7 @@@ F: drivers/media/i2c/ov13b10.
OMNIVISION OV2680 SENSOR DRIVER M: Rui Miguel Silva rmfrfs@gmail.com +M: Hans de Goede hansg@kernel.org L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -15667,7 -15634,7 +15686,7 @@@ F: drivers/media/i2c/ov2685.
OMNIVISION OV2740 SENSOR DRIVER M: Tianshu Qiu tian.shu.qiu@intel.com -R: Shawn Tu shawnx.tu@intel.com +R: Sakari Ailus sakari.ailus@linux.intel.com R: Bingbu Cao bingbu.cao@intel.com L: linux-media@vger.kernel.org S: Maintained @@@ -15699,7 -15666,7 +15718,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/i2c/ov5647.c
OMNIVISION OV5670 SENSOR DRIVER -M: Chiranjeevi Rapolu chiranjeevi.rapolu@intel.com +M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -15707,7 -15674,7 +15726,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/i2c/ov5670.c
OMNIVISION OV5675 SENSOR DRIVER -M: Shawn Tu shawnx.tu@intel.com +M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -15746,8 -15713,9 +15765,8 @@@ F: drivers/media/i2c/ov772x. F: include/media/i2c/ov772x.h
OMNIVISION OV7740 SENSOR DRIVER -M: Wenyou Yang wenyou.yang@microchip.com L: linux-media@vger.kernel.org -S: Maintained +S: Orphan T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/i2c/ov7740.txt F: drivers/media/i2c/ov7740.c @@@ -16052,7 -16020,7 +16071,7 @@@ M: Ilias Apalodimas <ilias.apalodimas@l L: netdev@vger.kernel.org S: Supported F: Documentation/networking/page_pool.rst - F: include/net/page_pool.h + F: include/net/page_pool/ F: include/trace/events/page_pool.h F: net/core/page_pool.c
@@@ -16679,8 -16647,6 +16698,8 @@@ L: linux-kernel@vger.kernel.or S: Supported W: https://perf.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core +T: git git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools.git perf-tools +T: git git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git perf-tools-next F: arch/*/events/* F: arch/*/events/*/* F: arch/*/include/asm/perf_event.h @@@ -17105,9 -17071,9 +17124,9 @@@ F: kernel/sched/psi.
PRINTK M: Petr Mladek pmladek@suse.com -M: Sergey Senozhatsky senozhatsky@chromium.org R: Steven Rostedt rostedt@goodmis.org R: John Ogness john.ogness@linutronix.de +R: Sergey Senozhatsky senozhatsky@chromium.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git F: include/linux/printk.h @@@ -17206,6 -17172,13 +17225,13 @@@ F: drivers/ptp/ F: include/linux/ptp_cl* K: (?:\b|_)ptp(?:\b|_)
+ PTP MOCKUP CLOCK SUPPORT + M: Vladimir Oltean vladimir.oltean@nxp.com + L: netdev@vger.kernel.org + S: Maintained + F: drivers/ptp/ptp_mock.c + F: include/linux/ptp_mock.h + PTP VIRTUAL CLOCK SUPPORT M: Yangbo Lu yangbo.lu@nxp.com L: netdev@vger.kernel.org @@@ -17580,7 -17553,7 +17606,7 @@@ L: linux-pm@vger.kernel.or L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml -F: drivers/soc/qcom/cpr.c +F: drivers/genpd/qcom/cpr.c
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 M: Ilia Lin ilia.lin@kernel.org @@@ -18064,7 -18037,7 +18090,7 @@@ F: include/linux/regmap.
REISERFS FILE SYSTEM L: reiserfs-devel@vger.kernel.org -S: Supported +S: Obsolete F: fs/reiserfs/
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM @@@ -18646,7 -18619,7 +18672,7 @@@ L: linux-s390@vger.kernel.or S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git F: Documentation/driver-api/s390-drivers.rst -F: Documentation/s390/ +F: Documentation/arch/s390/ F: arch/s390/ F: drivers/s390/ F: drivers/watchdog/diag288_wdt.c @@@ -18707,7 -18680,7 +18733,7 @@@ M: Niklas Schnelle <schnelle@linux.ibm. M: Gerald Schaefer gerald.schaefer@linux.ibm.com L: linux-s390@vger.kernel.org S: Supported -F: Documentation/s390/pci.rst +F: Documentation/arch/s390/pci.rst F: arch/s390/pci/ F: drivers/pci/hotplug/s390_pci_hpc.c
@@@ -18724,7 -18697,7 +18750,7 @@@ M: Halil Pasic <pasic@linux.ibm.com M: Jason Herne jjherne@linux.ibm.com L: linux-s390@vger.kernel.org S: Supported -F: Documentation/s390/vfio-ap* +F: Documentation/arch/s390/vfio-ap* F: drivers/s390/crypto/vfio_ap*
S390 VFIO-CCW DRIVER @@@ -18734,7 -18707,7 +18760,7 @@@ R: Halil Pasic <pasic@linux.ibm.com L: linux-s390@vger.kernel.org L: kvm@vger.kernel.org S: Supported -F: Documentation/s390/vfio-ccw.rst +F: Documentation/arch/s390/vfio-ccw.rst F: drivers/s390/cio/vfio_ccw* F: include/uapi/linux/vfio_ccw.h
@@@ -18840,7 -18813,7 +18866,7 @@@ SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVE M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org L: linux-kernel@vger.kernel.org L: linux-samsung-soc@vger.kernel.org -S: Supported +S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org F: Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml F: Documentation/devicetree/bindings/mfd/samsung,s2m*.yaml @@@ -18912,7 -18885,7 +18938,7 @@@ M: Tomasz Figa <tomasz.figa@gmail.com M: Chanwoo Choi cw00.choi@samsung.com R: Alim Akhtar alim.akhtar@samsung.com L: linux-samsung-soc@vger.kernel.org -S: Supported +S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git F: Documentation/devicetree/bindings/clock/samsung,*.yaml @@@ -19276,6 -19249,13 +19302,6 @@@ F: Documentation/devicetree/bindings/se F: drivers/tty/serdev/ F: include/linux/serdev.h
-SERIAL DRIVERS -M: Greg Kroah-Hartman gregkh@linuxfoundation.org -L: linux-serial@vger.kernel.org -S: Maintained -F: Documentation/devicetree/bindings/serial/ -F: drivers/tty/serial/ - SERIAL IR RECEIVER M: Sean Young sean@mess.org L: linux-media@vger.kernel.org @@@ -19638,6 -19618,13 +19664,6 @@@ M: Nicolas Pitre <nico@fluxnic.net S: Odd Fixes F: drivers/net/ethernet/smsc/smc91x.*
-SMM665 HARDWARE MONITOR DRIVER -M: Guenter Roeck linux@roeck-us.net -L: linux-hwmon@vger.kernel.org -S: Maintained -F: Documentation/hwmon/smm665.rst -F: drivers/hwmon/smm665.c - SMSC EMC2103 HARDWARE MONITOR DRIVER M: Steve Glendinning steve.glendinning@shawell.net L: linux-hwmon@vger.kernel.org @@@ -20320,18 -20307,6 +20346,18 @@@ S: Supporte F: Documentation/devicetree/bindings/mmc/starfive* F: drivers/mmc/host/dw_mmc-starfive.c
+STARFIVE JH7110 PLL CLOCK DRIVER +M: Xingyu Wu xingyu.wu@starfivetech.com +S: Supported +F: Documentation/devicetree/bindings/clock/starfive,jh7110-pll.yaml +F: drivers/clk/starfive/clk-starfive-jh7110-pll.c + +STARFIVE JH7110 SYSCON +M: William Qiu william.qiu@starfivetech.com +M: Xingyu Wu xingyu.wu@starfivetech.com +S: Supported +F: Documentation/devicetree/bindings/soc/starfive/starfive,jh7110-syscon.yaml + STARFIVE JH7110 TDM DRIVER M: Walker Chen walker.chen@starfivetech.com S: Maintained @@@ -20372,17 -20347,15 +20398,17 @@@ F: drivers/usb/cdns3/cdns3-starfive.
STARFIVE JH71XX PMU CONTROLLER DRIVER M: Walker Chen walker.chen@starfivetech.com +M: Changhuang Liang changhuang.liang@starfivetech.com S: Supported F: Documentation/devicetree/bindings/power/starfive* -F: drivers/soc/starfive/jh71xx_pmu.c +F: drivers/genpd/starfive/jh71xx-pmu.c F: include/dt-bindings/power/starfive,jh7110-pmu.h
STARFIVE SOC DRIVERS M: Conor Dooley conor@kernel.org S: Maintained T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/ +F: Documentation/devicetree/bindings/soc/starfive/ F: drivers/soc/starfive/
STARFIVE TRNG DRIVER @@@ -21111,39 -21084,6 +21137,39 @@@ S: Maintaine F: Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml F: sound/soc/ti/
+TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS +M: Shenghao Ding shenghao-ding@ti.com +M: Kevin Lu kevin-lu@ti.com +M: Baojun Xu x1077012@ti.com +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/sound/tas2552.txt +F: Documentation/devicetree/bindings/sound/tas2562.yaml +F: Documentation/devicetree/bindings/sound/tas2770.yaml +F: Documentation/devicetree/bindings/sound/tas27xx.yaml +F: Documentation/devicetree/bindings/sound/ti,pcm1681.txt +F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml +F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml +F: Documentation/devicetree/bindings/sound/tlv320adcx140.yaml +F: Documentation/devicetree/bindings/sound/tlv320aic31xx.txt +F: Documentation/devicetree/bindings/sound/tpa6130a2.txt +F: include/sound/tas2*.h +F: include/sound/tlv320*.h +F: include/sound/tpa6130a2-plat.h +F: sound/pci/hda/tas2781_hda_i2c.c +F: sound/soc/codecs/pcm1681.c +F: sound/soc/codecs/pcm1789*.* +F: sound/soc/codecs/pcm179x*.* +F: sound/soc/codecs/pcm186x*.* +F: sound/soc/codecs/pcm3008.* +F: sound/soc/codecs/pcm3060*.* +F: sound/soc/codecs/pcm3168a*.* +F: sound/soc/codecs/pcm5102a.c +F: sound/soc/codecs/pcm512x*.* +F: sound/soc/codecs/tas2*.* +F: sound/soc/codecs/tlv320*.* +F: sound/soc/codecs/tpa6130a2.* + TEXAS INSTRUMENTS DMA DRIVERS M: Peter Ujfalusi peter.ujfalusi@gmail.com L: dmaengine@vger.kernel.org @@@ -21192,7 -21132,7 +21218,7 @@@ F: drivers/irqchip/irq-ti-sci-inta. F: drivers/irqchip/irq-ti-sci-intr.c F: drivers/reset/reset-ti-sci.c F: drivers/soc/ti/ti_sci_inta_msi.c -F: drivers/soc/ti/ti_sci_pm_domains.c +F: drivers/genpd/ti/ti_sci_pm_domains.c F: include/dt-bindings/soc/ti,sci_pm_domain.h F: include/linux/soc/ti/ti_sci_inta_msi.h F: include/linux/soc/ti/ti_sci_protocol.h @@@ -21419,14 -21359,6 +21445,14 @@@ F: drivers/misc/tifm F: drivers/mmc/host/tifm_sd.c F: include/linux/tifm.h
+TI FPD-LINK DRIVERS +M: Tomi Valkeinen tomi.valkeinen@ideasonboard.com +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/i2c/ti,ds90* +F: drivers/media/i2c/ds90* +F: include/media/i2c/ds90* + TI KEYSTONE MULTICORE NAVIGATOR DRIVERS M: Nishanth Menon nm@ti.com M: Santosh Shilimkar ssantosh@kernel.org @@@ -21434,7 -21366,6 +21460,7 @@@ L: linux-kernel@vger.kernel.or L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git +F: drivers/genpd/ti/omap_prm.c F: drivers/soc/ti/*
TI LM49xxx FAMILY ASoC CODEC DRIVERS @@@ -21722,22 -21653,27 +21748,22 @@@ F: kernel/trace/trace_osnoise. F: kernel/trace/trace_sched_wakeup.c
TRADITIONAL CHINESE DOCUMENTATION -M: Hu Haowen src.res@email.cn -L: linux-doc-tw-discuss@lists.sourceforge.net (moderated for non-subscribers) +M: Hu Haowen src.res.211@gmail.com S: Maintained W: https://github.com/srcres258/linux-doc T: git git://github.com/srcres258/linux-doc.git doc-zh-tw F: Documentation/translations/zh_TW/
-TTY LAYER +TTY LAYER AND SERIAL DRIVERS M: Greg Kroah-Hartman gregkh@linuxfoundation.org M: Jiri Slaby jirislaby@kernel.org L: linux-kernel@vger.kernel.org L: linux-serial@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git +F: Documentation/devicetree/bindings/serial/ F: Documentation/driver-api/serial/ F: drivers/tty/ -F: drivers/tty/serial/serial_base.h -F: drivers/tty/serial/serial_base_bus.c -F: drivers/tty/serial/serial_core.c -F: drivers/tty/serial/serial_ctrl.c -F: drivers/tty/serial/serial_port.c F: include/linux/selection.h F: include/linux/serial.h F: include/linux/serial_core.h @@@ -21886,7 -21822,7 +21912,7 @@@ F: Documentation/admin-guide/ufs.rs F: fs/ufs/
UHID USERSPACE HID IO DRIVER -M: David Rheinsberg david.rheinsberg@gmail.com +M: David Rheinsberg david@readahead.eu L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/uhid.c @@@ -22371,39 -22307,6 +22397,39 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained F: drivers/clk/ux500/
+V4L2 ASYNC AND FWNODE FRAMEWORKS +M: Sakari Ailus sakari.ailus@linux.intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: drivers/media/v4l2-core/v4l2-async.c +F: drivers/media/v4l2-core/v4l2-fwnode.c +F: include/media/v4l2-async.h +F: include/media/v4l2-fwnode.h + +V4L2 LENS DRIVERS +M: Sakari Ailus sakari.ailus@linux.intel.com +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/ak* +F: drivers/media/i2c/dw* +F: drivers/media/i2c/lm* + +V4L2 CAMERA SENSOR DRIVERS +M: Sakari Ailus sakari.ailus@linux.intel.com +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/driver-api/media/camera-sensor.rst +F: Documentation/driver-api/media/tx-rx.rst +F: drivers/media/i2c/ar* +F: drivers/media/i2c/hi* +F: drivers/media/i2c/imx* +F: drivers/media/i2c/mt* +F: drivers/media/i2c/og* +F: drivers/media/i2c/ov* +F: drivers/media/i2c/s5* +F: drivers/media/i2c/st-vgxy61.c + VF610 NAND DRIVER M: Stefan Agner stefan@agner.ch L: linux-mtd@lists.infradead.org @@@ -22946,9 -22849,9 +22972,9 @@@ F: drivers/net/vrf. VSPRINTF M: Petr Mladek pmladek@suse.com M: Steven Rostedt rostedt@goodmis.org -M: Sergey Senozhatsky senozhatsky@chromium.org R: Andy Shevchenko andriy.shevchenko@linux.intel.com R: Rasmus Villemoes linux@rasmusvillemoes.dk +R: Sergey Senozhatsky senozhatsky@chromium.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git F: Documentation/core-api/printk-formats.rst @@@ -23050,7 -22953,7 +23076,7 @@@ S: Maintaine F: drivers/rtc/rtc-sd3078.c
WIIMOTE HID DRIVER -M: David Rheinsberg david.rheinsberg@gmail.com +M: David Rheinsberg david@readahead.eu L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/hid-wiimote* @@@ -23459,14 -23362,12 +23485,14 @@@ F: include/xen/arm/swiotlb-xen. F: include/xen/swiotlb-xen.h
XFS FILESYSTEM -M: Darrick J. Wong djwong@kernel.org +M: Chandan Babu R chandan.babu@oracle.com +R: Darrick J. Wong djwong@kernel.org L: linux-xfs@vger.kernel.org S: Supported W: http://xfs.org/ C: irc://irc.oftc.net/xfs T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git +P: Documentation/filesystems/xfs-maintainer-entry-profile.rst F: Documentation/ABI/testing/sysfs-fs-xfs F: Documentation/admin-guide/xfs.rst F: Documentation/filesystems/xfs-delayed-logging-design.rst diff --combined arch/powerpc/platforms/8xx/adder875.c index ae72c574eb7e,f6bd232f8323..d02f8dd66427 --- a/arch/powerpc/platforms/8xx/adder875.c +++ b/arch/powerpc/platforms/8xx/adder875.c @@@ -7,13 -7,12 +7,12 @@@ */
#include <linux/init.h> - #include <linux/fs_enet_pd.h> #include <linux/of_platform.h>
#include <asm/time.h> #include <asm/machdep.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> +#include <asm/8xx_immap.h> #include <asm/udbg.h>
#include "mpc8xx.h" diff --combined arch/powerpc/platforms/8xx/mpc885ads_setup.c index eb4e54ba417f,c7c4f082b838..76c7cd78c17e --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@@ -21,7 -21,6 +21,6 @@@ #include <linux/device.h> #include <linux/delay.h>
- #include <linux/fs_enet_pd.h> #include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> @@@ -37,6 -36,7 +36,6 @@@ #include <asm/time.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> #include <asm/udbg.h>
#include "mpc885ads.h" diff --combined arch/powerpc/platforms/8xx/tqm8xx_setup.c index c422262ba27b,6e56be852b2c..1670dfd30809 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@@ -24,7 -24,6 +24,6 @@@ #include <linux/device.h> #include <linux/delay.h>
- #include <linux/fs_enet_pd.h> #include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> @@@ -39,6 -38,7 +38,6 @@@ #include <asm/time.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> #include <asm/udbg.h>
#include "mpc8xx.h" diff --combined arch/powerpc/sysdev/fsl_soc.c index e71b3ede147e,c11771542bec..528506f6e2b8 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@@ -19,10 -19,10 +19,9 @@@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/phy.h> #include <linux/spi/spi.h> #include <linux/fsl_devices.h> - #include <linux/fs_enet_pd.h> #include <linux/fs_uart_pd.h> #include <linux/reboot.h>
@@@ -36,8 -36,6 +35,6 @@@ #include <asm/cpm2.h> #include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
- extern void init_fcc_ioports(struct fs_platform_info*); - extern void init_fec_ioports(struct fs_platform_info*); extern void init_smc_ioports(struct fs_uart_platform_info*); static phys_addr_t immrbase = -1;
diff --combined drivers/infiniband/hw/mlx4/main.c index 2d2cd17e02e6,1f8d0d2c5f17..529db874d67c --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@@ -82,6 -82,8 +82,8 @@@ static const char mlx4_ib_version[] static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num); + static int mlx4_ib_event(struct notifier_block *this, unsigned long event, + void *param);
static struct workqueue_struct *wq;
@@@ -125,14 -127,16 +127,16 @@@ static struct net_device *mlx4_ib_get_n u32 port_num) { struct mlx4_ib_dev *ibdev = to_mdev(device); - struct net_device *dev; + struct net_device *dev, *ret = NULL;
rcu_read_lock(); - dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); + for_each_netdev_rcu(&init_net, dev) { + if (dev->dev.parent != ibdev->ib_dev.dev.parent || + dev->dev_port + 1 != port_num) + continue;
- if (dev) { if (mlx4_is_bonded(ibdev->dev)) { - struct net_device *upper = NULL; + struct net_device *upper;
upper = netdev_master_upper_dev_get_rcu(dev); if (upper) { @@@ -143,11 -147,14 +147,14 @@@ dev = active; } } + + dev_hold(dev); + ret = dev; + break; } - dev_hold(dev);
rcu_read_unlock(); - return dev; + return ret; }
static int mlx4_ib_update_gids_v1(struct gid_entry *gids, @@@ -254,7 -261,7 +261,7 @@@ static int mlx4_ib_add_gid(const struc int ret = 0; int hw_update = 0; int i; - struct gid_entry *gids = NULL; + struct gid_entry *gids; u16 vlan_id = 0xffff; u8 mac[ETH_ALEN];
@@@ -293,7 -300,8 +300,7 @@@ ret = -ENOMEM; } else { *context = port_gid_table->gids[free].ctx; - memcpy(&port_gid_table->gids[free].gid, - &attr->gid, sizeof(attr->gid)); + port_gid_table->gids[free].gid = attr->gid; port_gid_table->gids[free].gid_type = attr->gid_type; port_gid_table->gids[free].vlan_id = vlan_id; port_gid_table->gids[free].ctx->real_index = free; @@@ -344,7 -352,7 +351,7 @@@ static int mlx4_ib_del_gid(const struc struct mlx4_port_gid_table *port_gid_table; int ret = 0; int hw_update = 0; - struct gid_entry *gids = NULL; + struct gid_entry *gids;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) return -EINVAL; @@@ -430,8 -438,8 +437,8 @@@ static int mlx4_ib_query_device(struct struct ib_udata *uhw) { struct mlx4_ib_dev *dev = to_mdev(ibdev); - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int err; int have_ib_ports; struct mlx4_uverbs_ex_query_device cmd; @@@ -648,8 -656,8 +655,8 @@@ mlx4_ib_port_link_layer(struct ib_devic static int ib_link_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props, int netw_view) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int ext_active_speed; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; @@@ -826,8 -834,8 +833,8 @@@ static int mlx4_ib_query_port(struct ib int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid, int netw_view) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int err = -ENOMEM; struct mlx4_ib_dev *dev = to_mdev(ibdev); int clear = 0; @@@ -891,8 -899,8 +898,8 @@@ static int mlx4_ib_query_sl2vl(struct i u64 *sl2vl_tbl) { union sl2vl_tbl_to_u64 sl2vl64; - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; int jj; @@@ -951,8 -959,8 +958,8 @@@ static void mlx4_init_sl2vl_tbl(struct int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey, int netw_view) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM;
@@@ -1967,8 -1975,8 +1974,8 @@@ static int mlx4_ib_mcg_detach(struct ib
static int init_node_data(struct mlx4_ib_dev *dev) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM;
@@@ -2318,61 -2326,53 +2325,53 @@@ unlock mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); }
- static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, - struct net_device *dev, - unsigned long event) + static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + unsigned long event)
{ - struct mlx4_ib_iboe *iboe; - int update_qps_port = -1; - int port; + struct mlx4_ib_iboe *iboe = &ibdev->iboe;
ASSERT_RTNL();
- iboe = &ibdev->iboe; + if (dev->dev.parent != ibdev->ib_dev.dev.parent) + return;
spin_lock_bh(&iboe->lock); - mlx4_foreach_ib_transport_port(port, ibdev->dev) { - - iboe->netdevs[port - 1] = - mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
- if (dev == iboe->netdevs[port - 1] && - (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || - event == NETDEV_UP || event == NETDEV_CHANGE)) - update_qps_port = port; + iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
- if (dev == iboe->netdevs[port - 1] && - (event == NETDEV_UP || event == NETDEV_DOWN)) { - enum ib_port_state port_state; - struct ib_event ibev = { }; + if (event == NETDEV_UP || event == NETDEV_DOWN) { + enum ib_port_state port_state; + struct ib_event ibev = { };
- if (ib_get_cached_port_state(&ibdev->ib_dev, port, - &port_state)) - continue; + if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1, + &port_state)) + goto iboe_out;
- if (event == NETDEV_UP && - (port_state != IB_PORT_ACTIVE || - iboe->last_port_state[port - 1] != IB_PORT_DOWN)) - continue; - if (event == NETDEV_DOWN && - (port_state != IB_PORT_DOWN || - iboe->last_port_state[port - 1] != IB_PORT_ACTIVE)) - continue; - iboe->last_port_state[port - 1] = port_state; - - ibev.device = &ibdev->ib_dev; - ibev.element.port_num = port; - ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : - IB_EVENT_PORT_ERR; - ib_dispatch_event(&ibev); - } + if (event == NETDEV_UP && + (port_state != IB_PORT_ACTIVE || + iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN)) + goto iboe_out; + if (event == NETDEV_DOWN && + (port_state != IB_PORT_DOWN || + iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE)) + goto iboe_out; + iboe->last_port_state[dev->dev_port] = port_state;
+ ibev.device = &ibdev->ib_dev; + ibev.element.port_num = dev->dev_port + 1; + ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : + IB_EVENT_PORT_ERR; + ib_dispatch_event(&ibev); } + + iboe_out: spin_unlock_bh(&iboe->lock);
- if (update_qps_port > 0) - mlx4_ib_update_qps(ibdev, dev, update_qps_port); + if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || + event == NETDEV_UP || event == NETDEV_CHANGE) + mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1); }
static int mlx4_ib_netdev_event(struct notifier_block *this, @@@ -2385,7 -2385,7 +2384,7 @@@ return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); - mlx4_ib_scan_netdevs(ibdev, dev, event); + mlx4_ib_scan_netdev(ibdev, dev, event);
return NOTIFY_DONE; } @@@ -2609,8 -2609,11 +2608,11 @@@ static const struct ib_device_ops mlx4_ .destroy_flow = mlx4_ib_destroy_flow, };
- static void *mlx4_ib_add(struct mlx4_dev *dev) + static int mlx4_ib_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; struct mlx4_ib_dev *ibdev; int num_ports = 0; int i, j; @@@ -2620,7 -2623,7 +2622,7 @@@ int num_req_counters; int allocated; u32 counter_index; - struct counter_index *new_counter_index = NULL; + struct counter_index *new_counter_index;
pr_info_once("%s", mlx4_ib_version);
@@@ -2630,27 -2633,31 +2632,31 @@@
/* No point in registering a device with no ports... */ if (num_ports == 0) - return NULL; + return -ENODEV;
ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); if (!ibdev) { dev_err(&dev->persist->pdev->dev, "Device struct alloc failed\n"); - return NULL; + return -ENOMEM; }
iboe = &ibdev->iboe;
- if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) + err = mlx4_pd_alloc(dev, &ibdev->priv_pdn); + if (err) goto err_dealloc;
- if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) + err = mlx4_uar_alloc(dev, &ibdev->priv_uar); + if (err) goto err_pd;
ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); - if (!ibdev->uar_map) + if (!ibdev->uar_map) { + err = -ENOMEM; goto err_uar; + } MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
ibdev->dev = dev; @@@ -2694,7 -2701,8 +2700,8 @@@
spin_lock_init(&iboe->lock);
- if (init_node_data(ibdev)) + err = init_node_data(ibdev); + if (err) goto err_map; mlx4_init_sl2vl_tbl(ibdev);
@@@ -2726,6 -2734,7 +2733,7 @@@ new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); if (!new_counter_index) { + err = -ENOMEM; if (allocated) mlx4_counter_free(ibdev->dev, counter_index); goto err_counter; @@@ -2743,8 -2752,10 +2751,10 @@@ new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); - if (!new_counter_index) + if (!new_counter_index) { + err = -ENOMEM; goto err_counter; + } new_counter_index->index = counter_index; new_counter_index->allocated = 0; list_add_tail(&new_counter_index->list, @@@ -2773,8 -2784,10 +2783,10 @@@
ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, GFP_KERNEL); - if (!ibdev->ib_uc_qpns_bitmap) + if (!ibdev->ib_uc_qpns_bitmap) { + err = -ENOMEM; goto err_steer_qp_release; + }
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { bitmap_zero(ibdev->ib_uc_qpns_bitmap, @@@ -2794,17 -2807,21 +2806,21 @@@ for (j = 1; j <= ibdev->dev->caps.num_ports; j++) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
- if (mlx4_ib_alloc_diag_counters(ibdev)) + err = mlx4_ib_alloc_diag_counters(ibdev); + if (err) goto err_steer_free_bitmap;
- if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", - &dev->persist->pdev->dev)) + err = ib_register_device(&ibdev->ib_dev, "mlx4_%d", + &dev->persist->pdev->dev); + if (err) goto err_diag_counters;
- if (mlx4_ib_mad_init(ibdev)) + err = mlx4_ib_mad_init(ibdev); + if (err) goto err_reg;
- if (mlx4_ib_init_sriov(ibdev)) + err = mlx4_ib_init_sriov(ibdev); + if (err) goto err_mad;
if (!iboe->nb.notifier_call) { @@@ -2838,7 -2855,14 +2854,14 @@@ do_slave_init(ibdev, j, 1); } } - return ibdev; + + /* register mlx4 core notifier */ + ibdev->mlx_nb.notifier_call = mlx4_ib_event; + err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb); + WARN(err, "failed to register mlx4 event notifier (%d)", err); + + auxiliary_set_drvdata(adev, ibdev); + return 0;
err_notif: if (ibdev->iboe.nb.notifier_call) { @@@ -2882,7 -2906,7 +2905,7 @@@ err_pd err_dealloc: ib_dealloc_device(&ibdev->ib_dev);
- return NULL; + return err; }
int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) @@@ -2922,7 -2946,7 +2945,7 @@@ int mlx4_ib_steer_qp_reg(struct mlx4_ib { int err; size_t flow_size; - struct ib_flow_attr *flow = NULL; + struct ib_flow_attr *flow; struct ib_flow_spec_ib *ib_spec;
if (is_attach) { @@@ -2942,19 -2966,23 +2965,23 @@@
err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC, MLX4_FS_REGULAR, &mqp->reg_id); - } else { - err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); + kfree(flow); + return err; } - kfree(flow); - return err; + + return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); }
- static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) + static void mlx4_ib_remove(struct auxiliary_device *adev) { - struct mlx4_ib_dev *ibdev = ibdev_ptr; + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; + struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev); int p; int i;
+ mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb); + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); ibdev->ib_active = false; @@@ -2991,7 -3019,7 +3018,7 @@@
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) { - struct mlx4_ib_demux_work **dm = NULL; + struct mlx4_ib_demux_work **dm; struct mlx4_dev *dev = ibdev->dev; int i; unsigned long flags; @@@ -3175,11 -3203,13 +3202,13 @@@ void mlx4_sched_ib_sl2vl_update_work(st } }
- static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, - enum mlx4_dev_event event, unsigned long param) + static int mlx4_ib_event(struct notifier_block *this, unsigned long event, + void *param) { + struct mlx4_ib_dev *ibdev = + container_of(this, struct mlx4_ib_dev, mlx_nb); + struct mlx4_dev *dev = ibdev->dev; struct ib_event ibev; - struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); struct mlx4_eqe *eqe = NULL; struct ib_event_work *ew; int p = 0; @@@ -3189,22 -3219,28 +3218,28 @@@ (event == MLX4_DEV_EVENT_PORT_DOWN))) { ew = kmalloc(sizeof(*ew), GFP_ATOMIC); if (!ew) - return; + return NOTIFY_DONE; INIT_WORK(&ew->work, handle_bonded_port_state_event); ew->ib_dev = ibdev; queue_work(wq, &ew->work); - return; + return NOTIFY_DONE; }
- if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) + switch (event) { + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + break; + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: eqe = (struct mlx4_eqe *)param; - else - p = (int) param; + break; + default: + p = *(int *)param; + break; + }
switch (event) { case MLX4_DEV_EVENT_PORT_UP: if (p > ibdev->num_ports) - return; + return NOTIFY_DONE; if (!mlx4_is_slave(dev) && rdma_port_get_link_layer(&ibdev->ib_dev, p) == IB_LINK_LAYER_INFINIBAND) { @@@ -3219,7 -3255,7 +3254,7 @@@
case MLX4_DEV_EVENT_PORT_DOWN: if (p > ibdev->num_ports) - return; + return NOTIFY_DONE; ibev.event = IB_EVENT_PORT_ERR; break;
@@@ -3232,7 -3268,7 +3267,7 @@@ case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - return; + return NOTIFY_DONE;
INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); @@@ -3242,7 -3278,7 +3277,7 @@@ queue_work(wq, &ew->work); else handle_port_mgmt_change_event(&ew->work); - return; + return NOTIFY_DONE;
case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ @@@ -3258,7 -3294,7 +3293,7 @@@ 1); } } - return; + return NOTIFY_DONE;
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: if (mlx4_is_master(dev)) { @@@ -3274,22 -3310,33 +3309,33 @@@ } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); - return; + return NOTIFY_DONE;
default: - return; + return NOTIFY_DONE; }
- ibev.device = ibdev_ptr; + ibev.device = &ibdev->ib_dev; ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
ib_dispatch_event(&ibev); + return NOTIFY_DONE; }
- static struct mlx4_interface mlx4_ib_interface = { - .add = mlx4_ib_add, - .remove = mlx4_ib_remove, - .event = mlx4_ib_event, + static const struct auxiliary_device_id mlx4_ib_id_table[] = { + { .name = MLX4_ADEV_NAME ".ib" }, + {}, + }; + + MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table); + + static struct mlx4_adrv mlx4_ib_adrv = { + .adrv = { + .name = "ib", + .probe = mlx4_ib_probe, + .remove = mlx4_ib_remove, + .id_table = mlx4_ib_id_table, + }, .protocol = MLX4_PROT_IB_IPV6, .flags = MLX4_INTFF_BONDING }; @@@ -3314,7 -3361,7 +3360,7 @@@ static int __init mlx4_ib_init(void if (err) goto clean_cm;
- err = mlx4_register_interface(&mlx4_ib_interface); + err = mlx4_register_auxiliary_driver(&mlx4_ib_adrv); if (err) goto clean_mcg;
@@@ -3336,7 -3383,7 +3382,7 @@@ clean_qp_event
static void __exit mlx4_ib_cleanup(void) { - mlx4_unregister_interface(&mlx4_ib_interface); + mlx4_unregister_auxiliary_driver(&mlx4_ib_adrv); mlx4_ib_mcg_destroy(); mlx4_ib_cm_destroy(); mlx4_ib_qp_event_cleanup(); diff --combined drivers/leds/trigger/ledtrig-netdev.c index 03c58e50cc44,42f758880ef8..cc3261543a5e --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@@ -406,15 -406,15 +406,15 @@@ static ssize_t interval_store(struct de
static DEVICE_ATTR_RW(interval);
-static ssize_t hw_control_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t offloaded_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%d\n", trigger_data->hw_control); }
-static DEVICE_ATTR_RO(hw_control); +static DEVICE_ATTR_RO(offloaded);
static struct attribute *netdev_trig_attrs[] = { &dev_attr_device_name.attr, @@@ -427,7 -427,7 +427,7 @@@ &dev_attr_rx.attr, &dev_attr_tx.attr, &dev_attr_interval.attr, - &dev_attr_hw_control.attr, + &dev_attr_offloaded.attr, NULL }; ATTRIBUTE_GROUPS(netdev_trig); @@@ -564,15 -564,17 +564,17 @@@ static int netdev_trig_activate(struct /* Check if hw control is active by default on the LED. * Init already enabled mode in hw control. */ - if (supports_hw_control(led_cdev) && - !led_cdev->hw_control_get(led_cdev, &mode)) { + if (supports_hw_control(led_cdev)) { dev = led_cdev->hw_control_get_device(led_cdev); if (dev) { const char *name = dev_name(dev);
set_device_name(trigger_data, name, strlen(name)); trigger_data->hw_control = true; - trigger_data->mode = mode; + + rc = led_cdev->hw_control_get(led_cdev, &mode); + if (!rc) + trigger_data->mode = mode; } }
@@@ -593,6 -595,8 +595,8 @@@ static void netdev_trig_deactivate(stru
cancel_delayed_work_sync(&trigger_data->work);
+ led_set_brightness(led_cdev, LED_OFF); + dev_put(trigger_data->net_dev);
kfree(trigger_data); diff --combined drivers/net/dsa/mt7530.c index b8bb9f3b3609,8fbda739c1b3..035a34b50f31 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@@ -1006,10 -1006,6 +1006,10 @@@ mt753x_trap_frames(struct mt7530_priv * mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, MT753X_BPDU_CPU_ONLY);
+ /* Trap 802.1X PAE frames to the CPU port(s) */ + mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, + MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); + /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); @@@ -2953,12 -2949,6 +2953,6 @@@ static void mt753x_phylink_get_caps(str config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
- /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. - */ - config->legacy_pre_march2020 = false; - priv->info->mac_port_get_caps(ds, port, config); }
diff --combined drivers/net/dsa/ocelot/felix_vsc9959.c index f16daa9b1765,4a6e52929d25..3c5509e75a54 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@@ -16,6 -16,7 +16,7 @@@ #include <net/pkt_sched.h> #include <linux/iopoll.h> #include <linux/mdio.h> + #include <linux/of.h> #include <linux/pci.h> #include <linux/time.h> #include "felix.h" @@@ -1069,9 -1070,6 +1070,9 @@@ static u64 vsc9959_tas_remaining_gate_l if (gate_len_ns == U64_MAX) return U64_MAX;
+ if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS) + return 0; + return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; }
@@@ -1748,10 -1746,10 +1749,10 @@@ static int vsc9959_stream_identify(stru struct flow_dissector *dissector = rule->match.dissector;
if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { diff --combined drivers/net/ethernet/broadcom/bgmac.c index 52ee3751187a,7c19ba58e9cc..448a1b90de5e --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@@ -1448,9 -1448,9 +1448,9 @@@ int bgmac_phy_connect_direct(struct bgm int err;
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phy_dev || IS_ERR(phy_dev)) { + if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); - return -ENODEV; + return PTR_ERR(phy_dev); }
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index a2d3a80236c4,f178ed9899a9..3ae8e8af8ab3 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@@ -3721,60 -3721,6 +3721,60 @@@ struct hwrm_func_backing_store_qcaps_v2 u8 valid; };
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */ +struct hwrm_func_dbr_pacing_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */ +struct hwrm_func_dbr_pacing_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; +#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL + u8 unused_0[7]; + __le32 dbr_stat_db_fifo_reg; +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \ + FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2 + __le32 dbr_stat_db_fifo_reg_watermark_mask; + u8 dbr_stat_db_fifo_reg_watermark_shift; + u8 unused_1[3]; + __le32 dbr_stat_db_fifo_reg_fifo_room_mask; + u8 dbr_stat_db_fifo_reg_fifo_room_shift; + u8 unused_2[3]; + __le32 dbr_throttling_aeq_arm_reg; +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \ + FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2 + u8 dbr_throttling_aeq_arm_reg_val; + u8 unused_3[7]; + __le32 primary_nq_id; + __le32 pacing_threshold; + u8 unused_4[7]; + u8 valid; +}; + /* hwrm_func_drv_if_change_input (size:192b/24B) */ struct hwrm_func_drv_if_change_input { __le16 req_type; @@@ -5793,286 -5739,48 +5793,48 @@@ struct hwrm_queue_cos2bw_qcfg_output #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; u8 unused_2[4]; u8 valid; }; @@@ -6136,286 -5844,48 +5898,48 @@@ struct hwrm_queue_cos2bw_cfg_input #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; u8 unused_1[5]; };
diff --combined drivers/net/ethernet/broadcom/genet/bcmmii.c index cc3afb605b1e,4012a141a229..97ea76d443ab --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@@ -617,9 -617,9 +617,9 @@@ static int bcmgenet_mii_pd_init(struct };
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phydev || IS_ERR(phydev)) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); - return -ENODEV; + return PTR_ERR(phydev); }
/* Make sure we initialize MoCA PHYs with a link down */ diff --combined drivers/net/ethernet/broadcom/tg3.c index cb2810f175cc,7f956cf36337..e388bffda2dd --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -1539,8 -1539,7 +1539,7 @@@ static int tg3_mdio_init(struct tg3 *tp return -ENOMEM;
tp->mdio_bus->name = "tg3 mdio bus"; - snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", - (tp->pdev->bus->number << 8) | tp->pdev->devfn); + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); tp->mdio_bus->priv = tp; tp->mdio_bus->parent = &tp->pdev->dev; tp->mdio_bus->read = &tg3_mdio_read; @@@ -6881,10 -6880,7 +6880,10 @@@ static int tg3_rx(struct tg3_napi *tnap
ri->data = NULL;
- skb = build_skb(data, frag_size); + if (frag_size) + skb = build_skb(data, frag_size); + else + skb = slab_build_skb(data); if (!skb) { tg3_frag_free(frag_size != 0, data); goto drop_it_no_recycle; diff --combined drivers/net/ethernet/freescale/fs_enet/fs_enet.h index aad96cb2ab4e,759bb7080e22..21c07ac05225 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@@ -2,6 -2,7 +2,7 @@@ #ifndef FS_ENET_H #define FS_ENET_H
+ #include <linux/clk.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/types.h> @@@ -9,8 -10,8 +10,6 @@@ #include <linux/phy.h> #include <linux/dma-mapping.h>
- #include <linux/fs_enet_pd.h> -#include <asm/fs_pd.h> -- #ifdef CONFIG_CPM1 #include <asm/cpm1.h> #endif @@@ -117,6 -118,23 +116,23 @@@ struct phy_info #define ENET_RX_ALIGN 16 #define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
+ struct fs_platform_info { + /* device specific information */ + u32 cp_command; /* CPM page/sblock/mcn */ + + u32 dpram_offset; + + struct device_node *phy_node; + + int rx_ring, tx_ring; /* number of buffers on rx */ + int rx_copybreak; /* limit we copy small frames */ + int napi_weight; /* NAPI weight */ + + int use_rmii; /* use RMII mode */ + + struct clk *clk_per; /* 'per' clock for register access */ + }; + struct fs_enet_private { struct napi_struct napi; struct device *dev; /* pointer back to the device (must be initialized first) */ @@@ -190,11 -208,6 +206,6 @@@ void fs_cleanup_bds(struct net_device * #define DRV_MODULE_NAME "fs_enet" #define PFX DRV_MODULE_NAME ": "
- /***************************************************************************/ - - int fs_enet_platform_init(void); - void fs_enet_platform_cleanup(void); - /***************************************************************************/ /* buffer descriptor access macros */
diff --combined drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index c9491b6e8708,d903a9012db0..e2ffac9eb2ad --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@@ -32,12 -32,12 +32,11 @@@ #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/of_address.h> - #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/gfp.h> #include <linux/pgtable.h>
#include <asm/immap_cpm2.h> -#include <asm/mpc8260.h> #include <asm/cpm2.h>
#include <asm/irq.h> @@@ -105,7 -105,7 +104,7 @@@ static int do_pd_setup(struct fs_enet_p goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr; - fpi->dpram_offset = cpm_dpalloc(128, 32); + fpi->dpram_offset = cpm_muram_alloc(128, 32); if (IS_ERR_VALUE(fpi->dpram_offset)) { ret = fpi->dpram_offset; goto out_fcccp; @@@ -547,7 -547,7 +546,7 @@@ static void tx_restart(struct net_devic } /* Now update the TBPTR and dirty flag to the current buffer */ W32(ep, fen_genfcc.fcc_tbptr, - (uint) (((void *)recheck_bd - fep->ring_base) + + (uint)(((void __iomem *)recheck_bd - fep->ring_base) + fep->ring_mem_addr)); fep->dirty_tx = recheck_bd;
diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index a86bfa3bba74,a5ba873c3b24..de7fd43dc11c --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -2609,7 -2609,7 +2609,7 @@@ int i40e_sync_vsi_filters(struct i40e_v retval = i40e_correct_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters); - else + else if (pf->vf) retval = i40e_correct_vf_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters, pf->vf[vsi->vf_id].trusted); @@@ -2782,8 -2782,7 +2782,8 @@@ }
/* if the VF is not trusted do not do promisc */ - if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { + if (vsi->type == I40E_VSI_SRIOV && pf->vf && + !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } @@@ -3586,11 -3585,6 +3586,6 @@@ static int i40e_configure_rx_ring(struc if (ring->xsk_pool) { ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); - /* For AF_XDP ZC, we disallow packets to span on - * multiple buffers, thus letting us skip that - * handling in the fast-path. - */ - chain_len = 1; ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); @@@ -5715,7 -5709,7 +5710,7 @@@ int i40e_update_adq_vsi_queues(struct i int ret;
if (!vsi) - return I40E_ERR_PARAM; + return -EINVAL; pf = vsi->back; hw = &pf->hw;
@@@ -7159,7 -7153,7 +7154,7 @@@ static int i40e_init_pf_dcb(struct i40e */ if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { dev_info(&pf->pdev->dev, "DCB is not supported.\n"); - err = I40E_NOT_SUPPORTED; + err = -EOPNOTSUPP; goto out; } if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { @@@ -7469,7 -7463,7 +7464,7 @@@ static int i40e_force_link_state(struc if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) non_zero_phy_type = true; else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) - return I40E_SUCCESS; + return 0;
/* To force link we need to set bits for all supported PHY types, * but there are now more than 32, so we need to split the bitmap @@@ -7520,7 -7514,7 +7515,7 @@@
i40e_aq_set_link_restart_an(hw, is_up, NULL);
- return I40E_SUCCESS; + return 0; }
/** @@@ -8367,7 -8361,7 +8362,7 @@@ int i40e_add_del_cloud_filter(struct i4 };
if (filter->flags >= ARRAY_SIZE(flag_table)) - return I40E_ERR_CONFIG; + return -EIO;
memset(&cld_filter, 0, sizeof(cld_filter));
@@@ -8531,15 -8525,15 +8526,15 @@@ static int i40e_parse_cls_flower(struc u8 field_flags = 0;
if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { - dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@@ -8581,7 -8575,7 +8576,7 @@@ } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return I40E_ERR_CONFIG; + return -EIO; } }
@@@ -8591,7 -8585,7 +8586,7 @@@ } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return I40E_ERR_CONFIG; + return -EIO; } } ether_addr_copy(filter->dst_mac, match.key->dst); @@@ -8609,7 -8603,7 +8604,7 @@@ } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", match.mask->vlan_id); - return I40E_ERR_CONFIG; + return -EIO; } }
@@@ -8633,7 -8627,7 +8628,7 @@@ } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &match.mask->dst); - return I40E_ERR_CONFIG; + return -EIO; } }
@@@ -8643,13 -8637,13 +8638,13 @@@ } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &match.mask->src); - return I40E_ERR_CONFIG; + return -EIO; } }
if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); - return I40E_ERR_CONFIG; + return -EIO; } filter->dst_ipv4 = match.key->dst; filter->src_ipv4 = match.key->src; @@@ -8667,7 -8661,7 +8662,7 @@@ ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); - return I40E_ERR_CONFIG; + return -EIO; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@@ -8689,7 -8683,7 +8684,7 @@@ } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", be16_to_cpu(match.mask->src)); - return I40E_ERR_CONFIG; + return -EIO; } }
@@@ -8699,7 -8693,7 +8694,7 @@@ } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", be16_to_cpu(match.mask->dst)); - return I40E_ERR_CONFIG; + return -EIO; } }
@@@ -9907,11 -9901,11 +9902,11 @@@ static void i40e_link_event(struct i40e status = i40e_get_link_status(&pf->hw, &new_link);
/* On success, disable temp link polling */ - if (status == I40E_SUCCESS) { + if (status == 0) { clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); } else { /* Enable link polling temporarily until i40e_get_link_status - * returns I40E_SUCCESS + * returns 0 */ set_bit(__I40E_TEMP_LINK_POLLING, pf->state); dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", @@@ -10165,7 -10159,7 +10160,7 @@@ static void i40e_clean_adminq_subtask(s
do { ret = i40e_clean_arq_element(hw, &event, &pending); - if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) + if (ret == -EALREADY) break; else if (ret) { dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); @@@ -12575,7 -12569,7 +12570,7 @@@ int i40e_commit_partition_bw_setting(st dev_info(&pf->pdev->dev, "Commit BW only works on partition 1! This is partition %d", pf->hw.partition_id); - ret = I40E_NOT_SUPPORTED; + ret = -EOPNOTSUPP; goto bw_commit_out; }
@@@ -12657,10 -12651,10 +12652,10 @@@ static bool i40e_is_total_port_shutdown #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 - int read_status = I40E_SUCCESS; u16 sr_emp_sr_settings_ptr = 0; u16 features_enable = 0; u16 link_behavior = 0; + int read_status = 0; bool ret = false;
read_status = i40e_read_nvm_word(&pf->hw, @@@ -13823,6 -13817,7 +13818,7 @@@ static int i40e_config_netdev(struct i4 NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY | NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD; } else { /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to @@@ -15467,12 -15462,12 +15463,12 @@@ static int i40e_pf_loop_reset(struct i4 int ret;
ret = i40e_pf_reset(hw); - while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { + while (ret != 0 && time_before(jiffies, time_end)) { usleep_range(10000, 20000); ret = i40e_pf_reset(hw); }
- if (ret == I40E_SUCCESS) + if (ret == 0) pf->pfr_count++; else dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); @@@ -15515,10 -15510,10 +15511,10 @@@ static int i40e_handle_resets(struct i4 const int pfr = i40e_pf_loop_reset(pf); const bool is_empr = i40e_check_fw_empr(pf);
- if (is_empr || pfr != I40E_SUCCESS) + if (is_empr || pfr != 0) dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
- return is_empr ? I40E_ERR_RESET_FAILED : pfr; + return is_empr ? -EIO : pfr; }
/** @@@ -15811,7 -15806,7 +15807,7 @@@ static int i40e_probe(struct pci_dev *p
err = i40e_init_adminq(hw); if (err) { - if (err == I40E_ERR_FIRMWARE_API_VERSION) + if (err == -EIO) dev_info(&pdev->dev, "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", hw->aq.api_maj_ver, diff --combined drivers/net/ethernet/intel/ice/ice_base.c index 074bf9403cd1,9ab9fb558b5e..7fa43827a3f0 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@@ -408,7 -408,6 +408,6 @@@ static unsigned int ice_rx_offset(struc */ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) { - int chain_len = ICE_MAX_CHAINED_RX_BUFS; struct ice_vsi *vsi = ring->vsi; u32 rxdid = ICE_RXDID_FLEX_NIC; struct ice_rlan_ctx rlan_ctx; @@@ -435,8 -434,7 +434,8 @@@ /* Receive Packet Data Buffer Size. * The Packet Data Buffer Size is defined in 128 byte units. */ - rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(ICE_RLAN_CTX_DBUF_S));
/* use 32 byte descriptors */ rlan_ctx.dsize = 1; @@@ -473,17 -471,11 +472,11 @@@ */ rlan_ctx.showiv = 0;
- /* For AF_XDP ZC, we disallow packets to span on - * multiple buffers, thus letting us skip that - * handling in the fast-path. - */ - if (ring->xsk_pool) - chain_len = 1; /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ rlan_ctx.rxmax = min_t(u32, vsi->max_frame, - chain_len * ring->rx_buf_len); + ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; diff --combined drivers/net/ethernet/intel/ice/ice_vf_lib.c index ea3310be8354,b95931272b16..24e4f4d897b6 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@@ -185,6 -185,25 +185,6 @@@ int ice_check_vf_ready_for_cfg(struct i return 0; }
-/** - * ice_check_vf_ready_for_reset - check if VF is ready to be reset - * @vf: VF to check if it's ready to be reset - * - * The purpose of this function is to ensure that the VF is not in reset, - * disabled, and is both initialized and active, thus enabling us to safely - * initialize another reset. - */ -int ice_check_vf_ready_for_reset(struct ice_vf *vf) -{ - int ret; - - ret = ice_check_vf_ready_for_cfg(vf); - if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - ret = -EAGAIN; - - return ret; -} - /** * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure @@@ -303,6 -322,237 +303,237 @@@ static int ice_vf_rebuild_vsi(struct ic return 0; }
+ /** + * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN + * @vf: VF to add MAC filters for + * @vsi: Pointer to VSI + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds either a VLAN 0 or port VLAN based filter after reset. + */ + static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) + { + struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + struct device *dev = ice_pf_to_dev(vf->pf); + int err; + + if (ice_vf_is_port_vlan_ena(vf)) { + err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); + if (err) { + dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", + vf->vf_id, err); + return err; + } + + err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); + } else { + err = ice_vsi_add_vlan_zero(vsi); + } + + if (err) { + dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", + ice_vf_is_port_vlan_ena(vf) ? + ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); + return err; + } + + err = vlan_ops->ena_rx_filtering(vsi); + if (err) + dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", + vf->vf_id, vsi->idx, err); + + return 0; + } + + /** + * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration + * @vf: VF to re-apply the configuration for + * + * Called after a VF VSI has been re-added/rebuild during reset. The PF driver + * needs to re-apply the host configured Tx rate limiting configuration. + */ + static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) + { + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int err; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (vf->min_tx_rate) { + err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); + if (err) { + dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", + vf->min_tx_rate, vf->vf_id, err); + return err; + } + } + + if (vf->max_tx_rate) { + err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); + if (err) { + dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", + vf->max_tx_rate, vf->vf_id, err); + return err; + } + } + + return 0; + } + + /** + * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value + * @vf: VF to configure trust setting for + */ + static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) + { + assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); + } + + /** + * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA + * @vf: VF to add MAC filters for + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. + */ + static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) + { + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + u8 broadcast[ETH_ALEN]; + int status; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (ice_is_eswitch_mode_switchdev(vf->pf)) + return 0; + + eth_broadcast_addr(broadcast); + status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", + vf->vf_id, status); + return status; + } + + vf->num_mac++; + + if (is_valid_ether_addr(vf->hw_lan_addr)) { + status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, + ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", + &vf->hw_lan_addr[0], vf->vf_id, + status); + return status; + } + vf->num_mac++; + + ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); + } + + return 0; + } + + /** + * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config + * @vsi: Pointer to VSI + * + * This function moves VSI into corresponding scheduler aggregator node + * based on cached value of "aggregator node info" per VSI + */ + static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) + { + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; + + if (!vsi->agg_node) + return; + + dev = ice_pf_to_dev(pf); + if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { + dev_dbg(dev, + "agg_id %u already has reached max_num_vsis %u\n", + vsi->agg_node->agg_id, vsi->agg_node->num_vsis); + return; + } + + status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, + vsi->idx, vsi->tc_cfg.ena_tc); + if (status) + dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", + vsi->idx, vsi->agg_node->agg_id); + else + vsi->agg_node->num_vsis++; + } + + /** + * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset + * @vf: VF to rebuild host configuration on + */ + static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) + { + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vf_set_host_trust_cfg(vf); + + if (ice_vf_rebuild_host_mac_cfg(vf)) + dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", + vf->vf_id); + + if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) + dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", + vf->vf_id); + + if (ice_vf_rebuild_host_tx_rate_cfg(vf)) + dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", + vf->vf_id); + + if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) + dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", + vf->vf_id); + + /* rebuild aggregator node config for main VF VSI */ + ice_vf_rebuild_aggregator_node_cfg(vsi); + } + + /** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ + static void ice_set_vf_state_qs_dis(struct ice_vf *vf) + { + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); + } + + /** + * ice_vf_set_initialized - VF is ready for VIRTCHNL communication + * @vf: VF to set in initialized state + * + * After this function the VF will be ready to receive/handle the + * VIRTCHNL_OP_GET_VF_RESOURCES message + */ + static void ice_vf_set_initialized(struct ice_vf *vf) + { + ice_set_vf_state_qs_dis(vf); + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); + } + /** * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild * @vf: the VF being reset @@@ -612,17 -862,11 +843,17 @@@ int ice_reset_vf(struct ice_vf *vf, u3 return 0; }
+ if (flags & ICE_VF_RESET_LOCK) + mutex_lock(&vf->cfg_lock); + else + lockdep_assert_held(&vf->cfg_lock); + if (ice_is_vf_disabled(vf)) { vsi = ice_get_vf_vsi(vf); if (!vsi) { dev_dbg(dev, "VF is already removed\n"); - return -EINVAL; + err = -EINVAL; + goto out_unlock; } ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
@@@ -631,9 -875,14 +862,9 @@@
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); - return 0; + goto out_unlock; }
- if (flags & ICE_VF_RESET_LOCK) - mutex_lock(&vf->cfg_lock); - else - lockdep_assert_held(&vf->cfg_lock); - /* Set VF disable bit state here, before triggering reset */ set_bit(ICE_VF_STATE_DIS, vf->vf_states); ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); @@@ -707,18 -956,6 +938,6 @@@ out_unlock return err; }
- /** - * ice_set_vf_state_qs_dis - Set VF queues state to disabled - * @vf: pointer to the VF structure - */ - static void ice_set_vf_state_qs_dis(struct ice_vf *vf) - { - /* Clear Rx/Tx enabled queues flag */ - bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); - bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); - clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); - } - /** * ice_set_vf_state_dis - Set VF state to disabled * @vf: pointer to the VF structure @@@ -959,211 -1196,6 +1178,6 @@@ bool ice_is_vf_link_up(struct ice_vf *v ICE_AQ_LINK_UP; }
- /** - * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value - * @vf: VF to configure trust setting for - */ - static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) - { - if (vf->trusted) - set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); - else - clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); - } - - /** - * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA - * @vf: VF to add MAC filters for - * - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver - * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. - */ - static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) - { - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - u8 broadcast[ETH_ALEN]; - int status; - - if (WARN_ON(!vsi)) - return -EINVAL; - - if (ice_is_eswitch_mode_switchdev(vf->pf)) - return 0; - - eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", - vf->vf_id, status); - return status; - } - - vf->num_mac++; - - if (is_valid_ether_addr(vf->hw_lan_addr)) { - status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", - &vf->hw_lan_addr[0], vf->vf_id, - status); - return status; - } - vf->num_mac++; - - ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); - } - - return 0; - } - - /** - * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN - * @vf: VF to add MAC filters for - * @vsi: Pointer to VSI - * - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver - * always re-adds either a VLAN 0 or port VLAN based filter after reset. - */ - static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) - { - struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - struct device *dev = ice_pf_to_dev(vf->pf); - int err; - - if (ice_vf_is_port_vlan_ena(vf)) { - err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); - if (err) { - dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", - vf->vf_id, err); - return err; - } - - err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); - } else { - err = ice_vsi_add_vlan_zero(vsi); - } - - if (err) { - dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", - ice_vf_is_port_vlan_ena(vf) ? - ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); - return err; - } - - err = vlan_ops->ena_rx_filtering(vsi); - if (err) - dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", - vf->vf_id, vsi->idx, err); - - return 0; - } - - /** - * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration - * @vf: VF to re-apply the configuration for - * - * Called after a VF VSI has been re-added/rebuild during reset. The PF driver - * needs to re-apply the host configured Tx rate limiting configuration. - */ - static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) - { - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - int err; - - if (WARN_ON(!vsi)) - return -EINVAL; - - if (vf->min_tx_rate) { - err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); - if (err) { - dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", - vf->min_tx_rate, vf->vf_id, err); - return err; - } - } - - if (vf->max_tx_rate) { - err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); - if (err) { - dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", - vf->max_tx_rate, vf->vf_id, err); - return err; - } - } - - return 0; - } - - /** - * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config - * @vsi: Pointer to VSI - * - * This function moves VSI into corresponding scheduler aggregator node - * based on cached value of "aggregator node info" per VSI - */ - static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) - { - struct ice_pf *pf = vsi->back; - struct device *dev; - int status; - - if (!vsi->agg_node) - return; - - dev = ice_pf_to_dev(pf); - if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { - dev_dbg(dev, - "agg_id %u already has reached max_num_vsis %u\n", - vsi->agg_node->agg_id, vsi->agg_node->num_vsis); - return; - } - - status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, - vsi->idx, vsi->tc_cfg.ena_tc); - if (status) - dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", - vsi->idx, vsi->agg_node->agg_id); - else - vsi->agg_node->num_vsis++; - } - - /** - * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset - * @vf: VF to rebuild host configuration on - */ - void ice_vf_rebuild_host_cfg(struct ice_vf *vf) - { - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - - if (WARN_ON(!vsi)) - return; - - ice_vf_set_host_trust_cfg(vf); - - if (ice_vf_rebuild_host_mac_cfg(vf)) - dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", - vf->vf_id); - - if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) - dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", - vf->vf_id); - - if (ice_vf_rebuild_host_tx_rate_cfg(vf)) - dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", - vf->vf_id); - - if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) - dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", - vf->vf_id); - - /* rebuild aggregator node config for main VF VSI */ - ice_vf_rebuild_aggregator_node_cfg(vsi); - } - /** * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access * @vf: VF that control VSI is being invalidated on @@@ -1292,23 -1324,6 +1306,6 @@@ void ice_vf_vsi_release(struct ice_vf * ice_vf_invalidate_vsi(vf); }
- /** - * ice_vf_set_initialized - VF is ready for VIRTCHNL communication - * @vf: VF to set in initialized state - * - * After this function the VF will be ready to receive/handle the - * VIRTCHNL_OP_GET_VF_RESOURCES message - */ - void ice_vf_set_initialized(struct ice_vf *vf) - { - ice_set_vf_state_qs_dis(vf); - clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_DIS, vf->vf_states); - set_bit(ICE_VF_STATE_INIT, vf->vf_states); - memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); - } - /** * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer * @pf: the PF private structure diff --combined drivers/net/ethernet/intel/ice/ice_virtchnl.c index dcf628b1fccd,4a02ed91ba73..b03426ac932b --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@@ -428,7 -428,7 +428,7 @@@ static int ice_vc_get_vf_res_msg(struc goto err; }
- len = sizeof(struct virtchnl_vf_resource); + len = virtchnl_struct_size(vfres, vsi_res, 0);
vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { @@@ -500,7 -500,7 +500,7 @@@ vfres->num_queue_pairs = vsi->num_txq; vfres->max_vectors = vf->pf->vfs.num_msix_per; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; - vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vfres->rss_lut_size = ICE_LUT_VSI_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; @@@ -962,7 -962,7 +962,7 @@@ static int ice_vc_config_rss_lut(struc goto error_param; }
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + if (vrl->lut_entries != ICE_LUT_VSI_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@@ -978,7 -978,7 +978,7 @@@ goto error_param; }
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) + if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, @@@ -1724,6 -1724,8 +1724,8 @@@ error_param vf->vf_id, i); }
+ ice_lag_move_new_vf_nodes(vf); + /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); @@@ -3947,6 -3949,7 +3949,6 @@@ error_handler ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: diff --combined drivers/net/ethernet/mediatek/mtk_wed.c index 3b651efcc25e,00aeee0d5e45..94376aa2b34c --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@@ -2,6 -2,7 +2,7 @@@ /* Copyright (C) 2021 Felix Fietkau nbd@nbd.name */
#include <linux/kernel.h> + #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitfield.h> @@@ -221,13 -222,9 +222,13 @@@ void mtk_wed_fe_reset(void
for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw = hw_list[i]; - struct mtk_wed_device *dev = hw->wed_dev; + struct mtk_wed_device *dev; int err;
+ if (!hw) + break; + + dev = hw->wed_dev; if (!dev || !dev->wlan.reset) continue;
@@@ -248,12 -245,8 +249,12 @@@ void mtk_wed_fe_reset_complete(void
for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw = hw_list[i]; - struct mtk_wed_device *dev = hw->wed_dev; + struct mtk_wed_device *dev; + + if (!hw) + break;
+ dev = hw->wed_dev; if (!dev || !dev->wlan.reset_complete) continue;
@@@ -1099,7 -1092,7 +1100,7 @@@ mtk_wed_rx_reset(struct mtk_wed_device } else { struct mtk_eth *eth = dev->hw->eth;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX_V2); else @@@ -1915,7 -1908,7 +1916,7 @@@ void mtk_wed_add_hw(struct device_node hw->wdma = wdma; hw->index = index; hw->irq = irq; - hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; + hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
if (hw->version == 1) { hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, diff --combined drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 85a2dfbb5c46,e87766f91150..b568988e92e3 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@@ -127,17 -127,23 +127,23 @@@ static int mlx5_fw_reset_get_reset_stat if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) goto out;
+ if (!reset_state) + return 0; + switch (reset_state) { case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION: case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS: - NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered"); + NL_SET_ERR_MSG_MOD(extack, "Sync reset still in progress"); return -EBUSY; - case MLX5_MFRL_REG_RESET_STATE_TIMEOUT: - NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout"); + case MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT: + NL_SET_ERR_MSG_MOD(extack, "Sync reset negotiation timeout"); return -ETIMEDOUT; case MLX5_MFRL_REG_RESET_STATE_NACK: NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset"); return -EPERM; + case MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT: + NL_SET_ERR_MSG_MOD(extack, "Sync reset unload timeout"); + return -ETIMEDOUT; }
out: @@@ -151,7 -157,7 +157,7 @@@ int mlx5_fw_reset_set_reset_sync(struc struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; - int err; + int err, rst_res;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
@@@ -164,13 -170,34 +170,34 @@@ return 0;
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); - if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) - return mlx5_fw_reset_get_reset_state_err(dev, extack); + if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) { + rst_res = mlx5_fw_reset_get_reset_state_err(dev, extack); + return rst_res ? rst_res : err; + }
NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed"); return mlx5_cmd_check(dev, err, in, out); }
+ int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev, + struct netlink_ext_ack *extack) + { + u8 rst_state; + int err; + + err = mlx5_fw_reset_get_reset_state_err(dev, extack); + if (err) + return err; + + rst_state = mlx5_get_fw_rst_state(dev); + if (!rst_state) + return 0; + + mlx5_core_err(dev, "Sync reset did not complete, state=%d\n", rst_state); + NL_SET_ERR_MSG_MOD(extack, "Sync reset did not complete successfully"); + return rst_state; + } + int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) { return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false); @@@ -311,7 -338,7 +338,7 @@@ static int mlx5_check_dev_ids(struct ml list_for_each_entry(sdev, &bridge_bus->devices, bus_list) { err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id); if (err) - return err; + return pcibios_err_to_errno(err); if (sdev_id != dev_id) { mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id); return -EPERM; @@@ -371,7 -398,7 +398,7 @@@ static int mlx5_pci_link_toggle(struct
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); if (err) - return err; + return pcibios_err_to_errno(err); err = mlx5_check_dev_ids(dev, dev_id); if (err) return err; @@@ -384,13 -411,18 +411,13 @@@ pci_cfg_access_lock(sdev); } /* PCI link toggle */ - err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, ®16); - if (err) - return err; - reg16 |= PCI_EXP_LNKCTL_LD; - err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16); + err = pcie_capability_set_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD); if (err) - return err; + return pcibios_err_to_errno(err); msleep(500); - reg16 &= ~PCI_EXP_LNKCTL_LD; - err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16); + err = pcie_capability_clear_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD); if (err) - return err; + return pcibios_err_to_errno(err);
/* Check link */ if (!bridge->link_active_reporting) { @@@ -403,7 -435,7 +430,7 @@@ do { err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, ®16); if (err) - return err; + return pcibios_err_to_errno(err); if (reg16 & PCI_EXP_LNKSTA_DLLLA) break; msleep(20); @@@ -421,7 -453,7 +448,7 @@@ do { err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, ®16); if (err) - return err; + return pcibios_err_to_errno(err); if (reg16 == dev_id) break; msleep(20); diff --combined drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 973de2adc943,7870327d921b..70f9b5e85a26 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@@ -32,8 -32,8 +32,8 @@@ static const struct mlxsw_afk_element_i MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), @@@ -43,6 -43,7 +43,7 @@@ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4), MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4), MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1), + MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16), };
struct mlxsw_afk { diff --combined drivers/net/ethernet/mellanox/mlxsw/reg.h index 17160e867bef,4b90ae44b476..ae556ddd7624 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@@ -97,6 -97,14 +97,6 @@@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1) */ MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
-/* reg_sspr_sub_port - * Virtual port within the physical port. - * Should be set to 0 when virtual ports are not enabled on the port. - * - * Access: RW - */ -MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); - /* reg_sspr_system_port * Unique identifier within the stacking domain that represents all the ports * that are available in the system (external ports). @@@ -112,6 -120,7 +112,6 @@@ static inline void mlxsw_reg_sspr_pack( MLXSW_REG_ZERO(sspr, payload); mlxsw_reg_sspr_m_set(payload, 1); mlxsw_reg_sspr_local_port_set(payload, local_port); - mlxsw_reg_sspr_sub_port_set(payload, 0); mlxsw_reg_sspr_system_port_set(payload, local_port); }
@@@ -2790,6 -2799,78 +2790,78 @@@ static inline void mlxsw_reg_ptar_unpac mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); }
+ /* PPRR - Policy-Engine Port Range Register + * ---------------------------------------- + * This register is used for configuring port range identification. + */ + #define MLXSW_REG_PPRR_ID 0x3008 + #define MLXSW_REG_PPRR_LEN 0x14 + + MLXSW_REG_DEFINE(pprr, MLXSW_REG_PPRR_ID, MLXSW_REG_PPRR_LEN); + + /* reg_pprr_ipv4 + * Apply port range register to IPv4 packets. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, ipv4, 0x00, 31, 1); + + /* reg_pprr_ipv6 + * Apply port range register to IPv6 packets. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, ipv6, 0x00, 30, 1); + + /* reg_pprr_src + * Apply port range register to source L4 ports. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, src, 0x00, 29, 1); + + /* reg_pprr_dst + * Apply port range register to destination L4 ports. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, dst, 0x00, 28, 1); + + /* reg_pprr_tcp + * Apply port range register to TCP packets. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, tcp, 0x00, 27, 1); + + /* reg_pprr_udp + * Apply port range register to UDP packets. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, udp, 0x00, 26, 1); + + /* reg_pprr_register_index + * Index of Port Range Register being accessed. + * Range is 0..cap_max_acl_l4_port_range-1. + * Access: Index + */ + MLXSW_ITEM32(reg, pprr, register_index, 0x00, 0, 8); + + /* reg_prrr_port_range_min + * Minimum port range for comparison. + * Match is defined as: + * port_range_min <= packet_port <= port_range_max. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, port_range_min, 0x04, 16, 16); + + /* reg_prrr_port_range_max + * Maximum port range for comparison. + * Access: RW + */ + MLXSW_ITEM32(reg, pprr, port_range_max, 0x04, 0, 16); + + static inline void mlxsw_reg_pprr_pack(char *payload, u8 register_index) + { + MLXSW_REG_ZERO(pprr, payload); + mlxsw_reg_pprr_register_index_set(payload, register_index); + } + /* PPBS - Policy-Engine Policy Based Switching Register * ---------------------------------------------------- * This register retrieves and sets Policy Based Switching Table entries. @@@ -9559,18 -9640,10 +9631,10 @@@ static inline void mlxsw_reg_mtbr_temp_ */
#define MLXSW_REG_MCIA_ID 0x9014 - #define MLXSW_REG_MCIA_LEN 0x40 + #define MLXSW_REG_MCIA_LEN 0x94
MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN);
- /* reg_mcia_l - * Lock bit. Setting this bit will lock the access to the specific - * cable. Used for updating a full page in a cable EPROM. Any access - * other then subsequence writes will fail while the port is locked. - * Access: RW - */ - MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1); - /* reg_mcia_module * Module number. * Access: Index @@@ -9635,7 -9708,6 +9699,6 @@@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0,
#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256 #define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128 - #define MLXSW_REG_MCIA_EEPROM_SIZE 48 #define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50 #define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51 #define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0 @@@ -9672,7 -9744,7 +9735,7 @@@ enum mlxsw_reg_mcia_eeprom_module_info * Bytes to read/write. * Access: RW */ - MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); + MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, 128);
/* This is used to access the optional upper pages (1-3) in the QSFP+ * memory map. Page 1 is available on offset 256 through 383, page 2 - @@@ -9683,14 -9755,12 +9746,12 @@@ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module, - u8 lock, u8 page_number, - u16 device_addr, u8 size, + u8 page_number, u16 device_addr, u8 size, u8 i2c_device_addr) { MLXSW_REG_ZERO(mcia, payload); mlxsw_reg_mcia_slot_set(payload, slot_index); mlxsw_reg_mcia_module_set(payload, module); - mlxsw_reg_mcia_l_set(payload, lock); mlxsw_reg_mcia_page_number_set(payload, page_number); mlxsw_reg_mcia_device_address_set(payload, device_addr); mlxsw_reg_mcia_size_set(payload, size); @@@ -10500,6 -10570,79 +10561,79 @@@ static inline void mlxsw_reg_mcda_pack( mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]); }
+ /* MCAM - Management Capabilities Mask Register + * -------------------------------------------- + * Reports the device supported management features. + */ + #define MLXSW_REG_MCAM_ID 0x907F + #define MLXSW_REG_MCAM_LEN 0x48 + + MLXSW_REG_DEFINE(mcam, MLXSW_REG_MCAM_ID, MLXSW_REG_MCAM_LEN); + + enum mlxsw_reg_mcam_feature_group { + /* Enhanced features. */ + MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES, + }; + + /* reg_mcam_feature_group + * Feature list mask index. + * Access: Index + */ + MLXSW_ITEM32(reg, mcam, feature_group, 0x00, 16, 8); + + enum mlxsw_reg_mcam_mng_feature_cap_mask_bits { + /* If set, MCIA supports 128 bytes payloads. Otherwise, 48 bytes. */ + MLXSW_REG_MCAM_MCIA_128B = 34, + }; + + #define MLXSW_REG_BYTES_PER_DWORD 0x4 + + /* reg_mcam_mng_feature_cap_mask + * Supported port's enhanced features. + * Based on feature_group index. + * When bit is set, the feature is supported in the device. + * Access: RO + */ + #define MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(_dw_num, _offset) \ + MLXSW_ITEM_BIT_ARRAY(reg, mcam, mng_feature_cap_mask_dw##_dw_num, \ + _offset, MLXSW_REG_BYTES_PER_DWORD, 1) + + /* The access to the bits in the field 'mng_feature_cap_mask' is not same to + * other mask fields in other registers. In most of the cases bit #0 is the + * first one in the last dword. In MCAM register, the first dword contains bits + * #0-#31 and so on, so the access to the bits is simpler using bit array per + * dword. Declare each dword of 'mng_feature_cap_mask' field separately. + */ + MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(0, 0x28); + MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(1, 0x2C); + MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(2, 0x30); + MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(3, 0x34); + + static inline void + mlxsw_reg_mcam_pack(char *payload, enum mlxsw_reg_mcam_feature_group feat_group) + { + MLXSW_REG_ZERO(mcam, payload); + mlxsw_reg_mcam_feature_group_set(payload, feat_group); + } + + static inline void + mlxsw_reg_mcam_unpack(char *payload, + enum mlxsw_reg_mcam_mng_feature_cap_mask_bits bit, + bool *p_mng_feature_cap_val) + { + int offset = bit % (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE); + int dword = bit / (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE); + u8 (*getters[])(const char *, u16) = { + mlxsw_reg_mcam_mng_feature_cap_mask_dw0_get, + mlxsw_reg_mcam_mng_feature_cap_mask_dw1_get, + mlxsw_reg_mcam_mng_feature_cap_mask_dw2_get, + mlxsw_reg_mcam_mng_feature_cap_mask_dw3_get, + }; + + if (!WARN_ON_ONCE(dword >= ARRAY_SIZE(getters))) + *p_mng_feature_cap_val = getters[dword](payload, offset); + } + /* MPSC - Monitoring Packet Sampling Configuration Register * -------------------------------------------------------- * MPSC Register is used to configure the Packet Sampling mechanism. @@@ -12810,6 -12953,7 +12944,7 @@@ static const struct mlxsw_reg_info *mlx MLXSW_REG(pacl), MLXSW_REG(pagt), MLXSW_REG(ptar), + MLXSW_REG(pprr), MLXSW_REG(ppbs), MLXSW_REG(prcr), MLXSW_REG(pefa), @@@ -12892,10 -13036,11 +13027,11 @@@ MLXSW_REG(mcion), MLXSW_REG(mtpps), MLXSW_REG(mtutc), - MLXSW_REG(mpsc), MLXSW_REG(mcqi), MLXSW_REG(mcc), MLXSW_REG(mcda), + MLXSW_REG(mcam), + MLXSW_REG(mpsc), MLXSW_REG(mgpc), MLXSW_REG(mprs), MLXSW_REG(mogcr), diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index ae2d6f12b799,b7f58605b6c7..cb746a43b24b --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@@ -31,12 -31,14 +31,14 @@@ static struct mlxsw_afk_element_inst ml
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), };
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; @@@ -171,7 -173,7 +173,7 @@@ static struct mlxsw_afk_element_inst ml
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), };
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { @@@ -205,6 -207,7 +207,7 @@@ static struct mlxsw_afk_element_inst ml
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */ + MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16), };
static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { @@@ -321,7 -324,7 +324,7 @@@ static struct mlxsw_afk_element_inst ml
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), - MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true), + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4), };
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { diff --combined drivers/net/ethernet/sfc/io.h index 07f99ad14bf3,7432c09010d6..4cc7b501135f --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@@ -17,46 -17,22 +17,22 @@@ * ************************************************************************** * - * Notes on locking strategy for the Falcon architecture: - * - * Many CSRs are very wide and cannot be read or written atomically. - * Writes from the host are buffered by the Bus Interface Unit (BIU) - * up to 128 bits. Whenever the host writes part of such a register, - * the BIU collects the written value and does not write to the - * underlying register until all 4 dwords have been written. A - * similar buffering scheme applies to host access to the NIC's 64-bit - * SRAM. - * - * Writes to different CSRs and 64-bit SRAM words must be serialised, - * since interleaved access can result in lost writes. We use - * efx_nic::biu_lock for this. - * - * We also serialise reads from 128-bit CSRs and SRAM with the same - * spinlock. This may not be necessary, but it doesn't really matter - * as there are no such reads on the fast path. + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD. * - * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are - * 128-bit but are special-cased in the BIU to avoid the need for - * locking in the host: + * The TX_DESC_UPD DMA descriptor pointer is 128-bits but is a special + * case in the BIU to avoid the need for locking in the host: * - * - They are write-only. - * - The semantics of writing to these registers are such that + * - It is write-only. + * - The semantics of writing to this register is such that * replacing the low 96 bits with zero does not affect functionality. - * - If the host writes to the last dword address of such a register + * - If the host writes to the last dword address of the register * (i.e. the high 32 bits) the underlying register will always be * written. If the collector and the current write together do not * provide values for all 128 bits of the register, the low 96 bits * will be written as zero. - * - If the host writes to the address of any other part of such a - * register while the collector already holds values for some other - * register, the write is discarded and the collector maintains its - * current state. - * - * The EF10 architecture exposes very few registers to the host and - * most of them are only 32 bits wide. The only exceptions are the MC - * doorbell register pair, which has its own latching, and - * TX_DESC_UPD, which works in a similar way to the Falcon - * architecture. */
#if BITS_PER_LONG == 64 @@@ -70,7 -46,7 +46,7 @@@ */ #ifdef CONFIG_X86_64 /* PIO is a win only if write-combining is possible */ -#ifdef ARCH_HAS_IOREMAP_WC +#ifdef ioremap_wc #define EFX_USE_PIO 1 #endif #endif @@@ -125,27 -101,6 +101,6 @@@ static inline void efx_writeo(struct ef spin_unlock_irqrestore(&efx->biu_lock, flags); }
- /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ - static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, - const efx_qword_t *value, unsigned int index) - { - unsigned int addr = index * sizeof(*value); - unsigned long flags __attribute__ ((unused)); - - netif_vdbg(efx, hw, efx->net_dev, - "writing SRAM address %x with " EFX_QWORD_FMT "\n", - addr, EFX_QWORD_VAL(*value)); - - spin_lock_irqsave(&efx->biu_lock, flags); - #ifdef EFX_USE_QWORD_IO - __raw_writeq((__force u64)value->u64[0], membase + addr); - #else - __raw_writel((__force u32)value->u32[0], membase + addr); - __raw_writel((__force u32)value->u32[1], membase + addr + 4); - #endif - spin_unlock_irqrestore(&efx->biu_lock, flags); - } - /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned int reg) @@@ -176,27 -131,6 +131,6 @@@ static inline void efx_reado(struct efx EFX_OWORD_VAL(*value)); }
- /* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ - static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, - efx_qword_t *value, unsigned int index) - { - unsigned int addr = index * sizeof(*value); - unsigned long flags __attribute__ ((unused)); - - spin_lock_irqsave(&efx->biu_lock, flags); - #ifdef EFX_USE_QWORD_IO - value->u64[0] = (__force __le64)__raw_readq(membase + addr); - #else - value->u32[0] = (__force __le32)__raw_readl(membase + addr); - value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); - #endif - spin_unlock_irqrestore(&efx->biu_lock, flags); - - netif_vdbg(efx, hw, efx->net_dev, - "read from SRAM address %x, got "EFX_QWORD_FMT"\n", - addr, EFX_QWORD_VAL(*value)); - } - /* Read a 32-bit CSR or SRAM */ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, unsigned int reg) diff --combined drivers/net/ethernet/sfc/selftest.c index 563c1e317ce9,e6d3bd4af044..894fad0bb5ea --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@@ -38,8 -38,7 +38,7 @@@ /* * Loopback test packet structure * - * The self-test should stress every RSS vector, and unfortunately - * Falcon only performs RSS on TCP/UDP packets. + * The self-test should stress every RSS vector. */ struct efx_loopback_payload { char pad[2]; /* Ensures ip is 4-byte aligned */ @@@ -426,7 -425,7 +425,7 @@@ static int efx_begin_loopback(struct ef for (i = 0; i < state->packet_count; i++) { /* Allocate an skb, holding an extra reference for * transmit completion counting */ - skb = alloc_skb(EFX_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); if (!skb) return -ENOMEM; state->skbs[i] = skb; @@@ -584,10 -583,6 +583,6 @@@ efx_test_loopback(struct efx_tx_queue * return 0; }
- /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but - * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it - * to delay and retry. Therefore, it's safer to just poll directly. Wait - * for link up and any faults to dissipate. */ static int efx_wait_for_link(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; diff --combined drivers/net/phy/phy.c index a9ecfdd19624,8aec8e83038c..df54c137c5f5 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@@ -455,6 -455,40 +455,40 @@@ int phy_do_ioctl_running(struct net_dev } EXPORT_SYMBOL(phy_do_ioctl_running);
+ /** + * __phy_hwtstamp_get - Get hardware timestamping configuration from PHY + * + * @phydev: the PHY device structure + * @config: structure holding the timestamping configuration + * + * Query the PHY device for its current hardware timestamping configuration. + */ + int __phy_hwtstamp_get(struct phy_device *phydev, + struct kernel_hwtstamp_config *config) + { + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, config->ifr, SIOCGHWTSTAMP); + } + + /** + * __phy_hwtstamp_set - Modify PHY hardware timestamping configuration + * + * @phydev: the PHY device structure + * @config: structure holding the timestamping configuration + * @extack: netlink extended ack structure, for error reporting + */ + int __phy_hwtstamp_set(struct phy_device *phydev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) + { + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, config->ifr, SIOCSHWTSTAMP); + } + /** * phy_queue_state_machine - Trigger the state machine to run soon * @@@ -1184,11 -1218,9 +1218,11 @@@ void phy_stop_machine(struct phy_devic
static void phy_process_error(struct phy_device *phydev) { - mutex_lock(&phydev->lock); + /* phydev->lock must be held for the state change to be safe */ + if (!mutex_is_locked(&phydev->lock)) + phydev_err(phydev, "PHY-device data unsafe context\n"); + phydev->state = PHY_ERROR; - mutex_unlock(&phydev->lock);
phy_trigger_machine(phydev); } @@@ -1197,9 -1229,7 +1231,9 @@@ static void phy_error_precise(struct ph const void *func, int err) { WARN(1, "%pS: returned: %d\n", func, err); + mutex_lock(&phydev->lock); phy_process_error(phydev); + mutex_unlock(&phydev->lock); }
/** @@@ -1208,7 -1238,8 +1242,7 @@@ * * Moves the PHY to the ERROR state in response to a read * or write error, and tells the controller the link is down. - * Must not be called from interrupt context, or while the - * phydev->lock is held. + * Must be called with phydev->lock held. */ void phy_error(struct phy_device *phydev) { diff --combined drivers/net/veth.c index ef8eacb596f7,8d5e12a5a845..da44bb1331ff --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@@ -26,7 -26,7 +26,7 @@@ #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> #include <linux/net_tstamp.h> - #include <net/page_pool.h> + #include <net/page_pool/helpers.h>
#define DRV_NAME "veth" #define DRV_VERSION "1.0" @@@ -1861,7 -1861,10 +1861,7 @@@ static int veth_newlink(struct net *src
nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + sizeof(struct ifinfomsg), - nla_len(nla_peer) - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err;
diff --combined drivers/net/wireless/ath/ath11k/pci.c index ec40adc1cb23,5fd08ffc2a9f..7c84a8a4a81e --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@@ -15,6 -15,7 +15,7 @@@ #include "mhi.h" #include "debug.h" #include "pcic.h" + #include "qmi.h"
#define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_DMA_MASK 32 @@@ -581,8 -582,8 +582,8 @@@ static void ath11k_pci_aspm_disable(str u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
/* disable L0s and L1 */ - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC);
set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); } @@@ -590,10 -591,8 +591,10 @@@ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) { if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl); + pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ab_pci->link_ctl & + PCI_EXP_LNKCTL_ASPMC); }
static int ath11k_pci_power_up(struct ath11k_base *ab) @@@ -899,6 -898,7 +900,7 @@@ unsupported_wcn6855_soc ath11k_err(ab, "failed to init core: %d\n", ret); goto err_irq_affinity_cleanup; } + ath11k_qmi_fwreset_from_cold_boot(ab); return 0;
err_irq_affinity_cleanup: diff --combined include/linux/lsm_hook_defs.h index af796986baee,4f2621e87634..f5b7352afaac --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@@ -54,7 -54,6 +54,7 @@@ LSM_HOOK(int, 0, bprm_creds_from_file, LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, @@@ -317,7 -316,7 +317,7 @@@ LSM_HOOK(int, 0, sk_alloc_security, str LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, struct sock *newsk) - LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid) + LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb, struct request_sock *req) diff --combined include/linux/memcontrol.h index 11810a2cfd2d,dbf26bc89dd4..ab94ad4597d0 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@@ -61,6 -61,7 +61,6 @@@ struct mem_cgroup_reclaim_cookie #ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT 16 -#define MEM_CGROUP_ID_MAX USHRT_MAX
struct mem_cgroup_id { int id; @@@ -111,9 -112,6 +111,9 @@@ struct lruvec_stats /* Aggregated (CPU and subtree) state */ long state[NR_VM_NODE_STAT_ITEMS];
+ /* Non-hierarchical (CPU aggregated) state */ + long state_local[NR_VM_NODE_STAT_ITEMS]; + /* Pending child counts during tree propagation */ long state_pending[NR_VM_NODE_STAT_ITEMS]; }; @@@ -286,6 -284,11 +286,11 @@@ struct mem_cgroup atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+ /* + * Hint of reclaim pressure for socket memroy management. Note + * that this indicator should NOT be used in legacy cgroup mode + * where socket memory is accounted/charged separately. + */ unsigned long socket_pressure;
/* Legacy tcp memory accounting */ @@@ -585,7 -588,7 +590,7 @@@ static inline void mem_cgroup_protectio /* * There is no reclaim protection applied to a targeted reclaim. * We are special casing this specific case here because - * mem_cgroup_protected calculation is not robust enough to keep + * mem_cgroup_calculate_protection is not robust enough to keep * the protection invariant for calculated effective values for * parallel reclaimers with different reclaim target. This is * especially a problem for tail memcgs (as they have pages on LRU) @@@ -863,7 -866,8 +868,7 @@@ static inline struct mem_cgroup *lruvec * parent_mem_cgroup - find the accounting parent of a memcg * @memcg: memcg whose parent to find * - * Returns the parent memcg, or NULL if this is the root or the memory - * controller is in legacy no-hierarchy mode. + * Returns the parent memcg, or NULL if this is the root. */ static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { @@@ -1021,12 -1025,14 +1026,12 @@@ static inline unsigned long lruvec_page { struct mem_cgroup_per_node *pn; long x = 0; - int cpu;
if (mem_cgroup_disabled()) return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - for_each_possible_cpu(cpu) - x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); + x = READ_ONCE(pn->lruvec_stats.state_local[idx]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@@ -1157,6 -1163,7 +1162,6 @@@ unsigned long mem_cgroup_soft_limit_rec #else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0 -#define MEM_CGROUP_ID_MAX 0
static inline struct mem_cgroup *folio_memcg(struct folio *folio) { @@@ -1725,8 -1732,8 +1730,8 @@@ void mem_cgroup_sk_alloc(struct sock *s void mem_cgroup_sk_free(struct sock *sk); static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) - return true; + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return !!memcg->tcpmem_pressure; do { if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) return true; @@@ -1759,7 -1766,7 +1764,7 @@@ int __memcg_kmem_charge_page(struct pag void __memcg_kmem_uncharge_page(struct page *page, int order);
struct obj_cgroup *get_obj_cgroup_from_current(void); -struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); +struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); @@@ -1843,7 -1850,7 +1848,7 @@@ static inline void __memcg_kmem_uncharg { }
-static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) +static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) { return NULL; } diff --combined include/linux/security.h index bac98ea18f78,994cf099d9ac..b2c38bfe5647 --- a/include/linux/security.h +++ b/include/linux/security.h @@@ -293,7 -293,6 +293,7 @@@ int security_bprm_creds_from_file(struc int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); +int security_fs_context_submount(struct fs_context *fc, struct super_block *reference); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); @@@ -630,11 -629,6 +630,11 @@@ static inline void security_bprm_commit { }
+static inline int security_fs_context_submount(struct fs_context *fc, + struct super_block *reference) +{ + return 0; +} static inline int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { @@@ -1445,7 -1439,8 +1445,8 @@@ int security_socket_getpeersec_dgram(st int security_sk_alloc(struct sock *sk, int family, gfp_t priority); void security_sk_free(struct sock *sk); void security_sk_clone(const struct sock *sk, struct sock *newsk); - void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic); + void security_sk_classify_flow(const struct sock *sk, + struct flowi_common *flic); void security_req_classify_flow(const struct request_sock *req, struct flowi_common *flic); void security_sock_graft(struct sock*sk, struct socket *parent); @@@ -1603,7 -1598,7 +1604,7 @@@ static inline void security_sk_clone(co { }
- static inline void security_sk_classify_flow(struct sock *sk, + static inline void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic) { } diff --combined include/net/inet_sock.h index 491ceb7ebe5d,acbb93d7607a..2de0e4d4a027 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@@ -194,13 -194,13 +194,13 @@@ struct rtable * @inet_rcv_saddr - Bound local IPv4 addr * @inet_dport - Destination port * @inet_num - Local port + * @inet_flags - various atomic flags * @inet_saddr - Sending source * @uc_ttl - Unicast TTL * @inet_sport - Source port * @inet_id - ID counter for DF pkts * @tos - TOS * @mc_ttl - Multicasting TTL - * @is_icsk - is this an inet_connection_sock? * @uc_index - Unicast outgoing device index * @mc_index - Multicast device index * @mc_list - Group array @@@ -218,57 -218,88 +218,88 @@@ struct inet_sock #define inet_dport sk.__sk_common.skc_dport #define inet_num sk.__sk_common.skc_num
+ unsigned long inet_flags; __be32 inet_saddr; __s16 uc_ttl; - __u16 cmsg_flags; + __be16 inet_sport; struct ip_options_rcu __rcu *inet_opt; - __u16 inet_id; + atomic_t inet_id; - __be16 inet_sport;
__u8 tos; __u8 min_ttl; __u8 mc_ttl; __u8 pmtudisc; - __u8 recverr:1, - is_icsk:1, - freebind:1, - hdrincl:1, - mc_loop:1, - transparent:1, - mc_all:1, - nodefrag:1; - __u8 bind_address_no_port:1, - recverr_rfc4884:1, - defer_connect:1; /* Indicates that fastopen_connect is set - * and cookie exists so we defer connect - * until first data frame is written - */ __u8 rcv_tos; __u8 convert_csum; int uc_index; int mc_index; __be32 mc_addr; - struct ip_mc_socklist __rcu *mc_list; - struct inet_cork_full cork; struct { __u16 lo; __u16 hi; } local_port_range; + + struct ip_mc_socklist __rcu *mc_list; + struct inet_cork_full cork; };
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ #define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+ enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, + }; + /* cmsg flags for inet */ - #define IP_CMSG_PKTINFO BIT(0) - #define IP_CMSG_TTL BIT(1) - #define IP_CMSG_TOS BIT(2) - #define IP_CMSG_RECVOPTS BIT(3) - #define IP_CMSG_RETOPTS BIT(4) - #define IP_CMSG_PASSSEC BIT(5) - #define IP_CMSG_ORIGDSTADDR BIT(6) - #define IP_CMSG_CHECKSUM BIT(7) - #define IP_CMSG_RECVFRAGSIZE BIT(8) + #define IP_CMSG_PKTINFO BIT(INET_FLAGS_PKTINFO) + #define IP_CMSG_TTL BIT(INET_FLAGS_TTL) + #define IP_CMSG_TOS BIT(INET_FLAGS_TOS) + #define IP_CMSG_RECVOPTS BIT(INET_FLAGS_RECVOPTS) + #define IP_CMSG_RETOPTS BIT(INET_FLAGS_RETOPTS) + #define IP_CMSG_PASSSEC BIT(INET_FLAGS_PASSSEC) + #define IP_CMSG_ORIGDSTADDR BIT(INET_FLAGS_ORIGDSTADDR) + #define IP_CMSG_CHECKSUM BIT(INET_FLAGS_CHECKSUM) + #define IP_CMSG_RECVFRAGSIZE BIT(INET_FLAGS_RECVFRAGSIZE) + + #define IP_CMSG_ALL (IP_CMSG_PKTINFO | IP_CMSG_TTL | \ + IP_CMSG_TOS | IP_CMSG_RECVOPTS | \ + IP_CMSG_RETOPTS | IP_CMSG_PASSSEC | \ + IP_CMSG_ORIGDSTADDR | IP_CMSG_CHECKSUM | \ + IP_CMSG_RECVFRAGSIZE) + + static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) + { + return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL; + } + + #define inet_test_bit(nr, sk) \ + test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) + #define inet_set_bit(nr, sk) \ + set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) + #define inet_clear_bit(nr, sk) \ + clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) + #define inet_assign_bit(nr, sk, val) \ + assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
static inline bool sk_is_inet(struct sock *sk) { @@@ -363,7 -394,7 +394,7 @@@ static inline __u8 inet_sk_flowi_flags( { __u8 flags = 0;
- if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) + if (inet_test_bit(TRANSPARENT, sk) || inet_test_bit(HDRINCL, sk)) flags |= FLOWI_FLAG_ANYSRC; return flags; } @@@ -389,7 -420,8 +420,8 @@@ static inline bool inet_can_nonlocal_bi struct inet_sock *inet) { return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || - inet->freebind || inet->transparent; + test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || + test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); }
static inline bool inet_addr_valid_or_nonlocal(struct net *net, diff --combined include/net/sock.h index 690e22139543,2b9c0ec79e40..11d503417591 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -1323,7 -1323,6 +1323,7 @@@ struct proto /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ @@@ -1340,6 -1339,7 +1340,7 @@@
struct kmem_cache *slab; unsigned int obj_size; + unsigned int ipv6_pinfo_offset; slab_flags_t slab_flags; unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */ @@@ -1424,7 -1424,7 +1425,7 @@@ static inline bool sk_has_memory_pressu static inline bool sk_under_global_memory_pressure(const struct sock *sk) { return sk->sk_prot->memory_pressure && - !!*sk->sk_prot->memory_pressure; + !!READ_ONCE(*sk->sk_prot->memory_pressure); }
static inline bool sk_under_memory_pressure(const struct sock *sk) @@@ -1436,7 -1436,7 +1437,7 @@@ mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true;
- return !!*sk->sk_prot->memory_pressure; + return !!READ_ONCE(*sk->sk_prot->memory_pressure); }
static inline long @@@ -1513,7 -1513,7 +1514,7 @@@ proto_memory_pressure(struct proto *pro { if (!prot->memory_pressure) return false; - return !!*prot->memory_pressure; + return !!READ_ONCE(*prot->memory_pressure); }
@@@ -2821,20 -2821,23 +2822,23 @@@ sk_is_refcounted(struct sock *sk * skb_steal_sock - steal a socket from an sk_buff * @skb: sk_buff to steal the socket from * @refcounted: is set to true if the socket is reference-counted + * @prefetched: is set to true if the socket was assigned from bpf */ static inline struct sock * - skb_steal_sock(struct sk_buff *skb, bool *refcounted) + skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched) { if (skb->sk) { struct sock *sk = skb->sk;
*refcounted = true; - if (skb_sk_is_prefetched(skb)) + *prefetched = skb_sk_is_prefetched(skb); + if (*prefetched) *refcounted = sk_is_refcounted(sk); skb->destructor = NULL; skb->sk = NULL; return sk; } + *prefetched = false; *refcounted = false; return NULL; } diff --combined include/net/tcp.h index 3a818fe1a8a5,07b21d9a9620..91688d0dadcd --- a/include/net/tcp.h +++ b/include/net/tcp.h @@@ -45,6 -45,7 +45,6 @@@ #include <linux/memcontrol.h> #include <linux/bpf-cgroup.h> #include <linux/siphash.h> -#include <linux/net_mm.h>
extern struct inet_hashinfo tcp_hashinfo;
@@@ -322,7 -323,6 +322,6 @@@ int tcp_v4_early_demux(struct sk_buff * int tcp_v4_rcv(struct sk_buff *skb);
void tcp_remove_empty_skb(struct sock *sk); - int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, @@@ -349,7 -349,6 +348,6 @@@ ssize_t tcp_splice_read(struct socket * struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule);
- void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@@ -605,7 -604,6 +603,6 @@@ int tcp_fragment(struct sock *sk, enum unsigned int mss_now, gfp_t gfp);
void tcp_send_probe0(struct sock *); - void tcp_send_partial(struct sock *); int tcp_write_wakeup(struct sock *, int mib); void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); @@@ -623,7 -621,6 +620,6 @@@ void tcp_skb_collapse_tstamp(struct sk_ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk, struct sk_buff *skb); - void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); void tcp_sack_compress_send_ack(struct sock *sk); @@@ -1431,13 -1428,39 +1427,39 @@@ void tcp_select_initial_window(const st __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale, __u32 init_rcv_wnd);
+ static inline int __tcp_win_from_space(u8 scaling_ratio, int space) + { + s64 scaled_space = (s64)space * scaling_ratio; + + return scaled_space >> TCP_RMEM_TO_WIN_SCALE; + } + static inline int tcp_win_from_space(const struct sock *sk, int space) { - int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); + return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space); + } + + /* inverse of __tcp_win_from_space() */ + static inline int __tcp_space_from_win(u8 scaling_ratio, int win) + { + u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
- return tcp_adv_win_scale <= 0 ? - (space>>(-tcp_adv_win_scale)) : - space - (space>>tcp_adv_win_scale); + do_div(val, scaling_ratio); + return val; + } + + static inline int tcp_space_from_win(const struct sock *sk, int win) + { + return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); + } + + static inline void tcp_scaling_ratio_init(struct sock *sk) + { + /* Assume a conservative default of 1200 bytes of payload per 4K page. + * This may be adjusted later in tcp_measure_rcv_mss(). + */ + tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) / + SKB_TRUESIZE(4096); }
/* Note: caller must be prepared to deal with negative returns */ @@@ -2008,7 -2031,7 +2030,7 @@@ static inline bool inet_sk_transparent( case TCP_NEW_SYN_RECV: return inet_rsk(inet_reqsk(sk))->no_srccheck; } - return inet_sk(sk)->transparent; + return inet_test_bit(TRANSPARENT, sk); }
/* Determines whether this is a thin stream (which may suffer from @@@ -2335,7 -2358,6 +2357,6 @@@ struct sk_msg struct sk_psock;
#ifdef CONFIG_BPF_SYSCALL - struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); #endif /* CONFIG_BPF_SYSCALL */ diff --combined net/batman-adv/hard-interface.c index 24c9c0c3f316,5a4ff9a81e74..96a412beab2d --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@@ -9,6 -9,7 +9,7 @@@
#include <linux/atomic.h> #include <linux/byteorder/generic.h> + #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/gfp.h> @@@ -630,19 -631,7 +631,19 @@@ out */ void batadv_update_min_mtu(struct net_device *soft_iface) { - soft_iface->mtu = batadv_hardif_min_mtu(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(soft_iface); + int limit_mtu; + int mtu; + + mtu = batadv_hardif_min_mtu(soft_iface); + + if (bat_priv->mtu_set_by_user) + limit_mtu = bat_priv->mtu_set_by_user; + else + limit_mtu = ETH_DATA_LEN; + + mtu = min(mtu, limit_mtu); + dev_set_mtu(soft_iface, mtu);
/* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. @@@ -711,9 -700,14 +712,14 @@@ int batadv_hardif_enable_interface(stru struct batadv_priv *bat_priv; __be16 ethertype = htons(ETH_P_BATMAN); int max_header_len = batadv_max_header_len(); + unsigned int required_mtu; + unsigned int hardif_mtu; int ret;
- if (hard_iface->net_dev->mtu < ETH_MIN_MTU + max_header_len) + hardif_mtu = READ_ONCE(hard_iface->net_dev->mtu); + required_mtu = READ_ONCE(soft_iface->mtu) + max_header_len; + + if (hardif_mtu < ETH_MIN_MTU + max_header_len) return -EINVAL;
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) @@@ -746,18 -740,18 +752,18 @@@ hard_iface->net_dev->name);
if (atomic_read(&bat_priv->fragmentation) && - hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len) + hardif_mtu < required_mtu) batadv_info(hard_iface->soft_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n", - hard_iface->net_dev->name, hard_iface->net_dev->mtu, - ETH_DATA_LEN + max_header_len); + hard_iface->net_dev->name, hardif_mtu, + required_mtu);
if (!atomic_read(&bat_priv->fragmentation) && - hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len) + hardif_mtu < required_mtu) batadv_info(hard_iface->soft_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n", - hard_iface->net_dev->name, hard_iface->net_dev->mtu, - ETH_DATA_LEN + max_header_len); + hard_iface->net_dev->name, hardif_mtu, + required_mtu);
if (batadv_hardif_is_iface_up(hard_iface)) batadv_hardif_activate_interface(hard_iface); diff --combined net/batman-adv/netlink.c index 6efbc9275aec,d37872b34281..0c64d81a7761 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@@ -377,7 -377,7 +377,7 @@@ nla_put_failure * * Return: 0 on success, < 0 on error */ - int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv) + static int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv) { struct sk_buff *msg; int ret; @@@ -495,10 -495,7 +495,10 @@@ static int batadv_netlink_set_mesh(stru attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); + + rtnl_lock(); batadv_update_min_mtu(bat_priv->soft_iface); + rtnl_unlock(); }
if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) { @@@ -551,15 -548,12 +551,12 @@@ * algorithm in use implements the GW API */
- u32 sel_class_max = 0xffffffffu; + u32 sel_class_max = bat_priv->algo_ops->gw.sel_class_max; u32 sel_class;
attr = info->attrs[BATADV_ATTR_GW_SEL_CLASS]; sel_class = nla_get_u32(attr);
- if (!bat_priv->algo_ops->gw.store_sel_class) - sel_class_max = BATADV_TQ_MAX_VALUE; - if (sel_class >= 1 && sel_class <= sel_class_max) { atomic_set(&bat_priv->gw.sel_class, sel_class); batadv_gw_reselect(bat_priv); @@@ -861,8 -855,8 +858,8 @@@ nla_put_failure * * Return: 0 on success, < 0 on error */ - int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv, - struct batadv_hard_iface *hard_iface) + static int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv, + struct batadv_hard_iface *hard_iface) { struct sk_buff *msg; int ret; @@@ -1076,8 -1070,8 +1073,8 @@@ nla_put_failure * * Return: 0 on success, < 0 on error */ - int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv, - struct batadv_softif_vlan *vlan) + static int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv, + struct batadv_softif_vlan *vlan) { struct sk_buff *msg; int ret; diff --combined net/batman-adv/soft-interface.c index 85d00dc9ce32,f7947fad06f2..1bf1232a4f75 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -153,14 -153,11 +153,14 @@@ static int batadv_interface_set_mac_add
static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) { + struct batadv_priv *bat_priv = netdev_priv(dev); + /* check ranges */ - if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev)) + if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev)) return -EINVAL;
dev->mtu = new_mtu; + bat_priv->mtu_set_by_user = new_mtu;
return 0; } diff --combined net/batman-adv/types.h index cf1a0eafe3ab,54c2b8fa48cc..17d5ea1d8e84 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -1546,12 -1546,6 +1546,12 @@@ struct batadv_priv /** @soft_iface: net device which holds this struct as private data */ struct net_device *soft_iface;
+ /** + * @mtu_set_by_user: MTU was set once by user + * protected by rtnl_lock + */ + int mtu_set_by_user; + /** * @bat_counters: mesh internal traffic statistic counters (see * batadv_counters) @@@ -2197,11 -2191,10 +2197,10 @@@ struct batadv_algo_gw_ops void (*init_sel_class)(struct batadv_priv *bat_priv);
/** - * @store_sel_class: parse and stores a new GW selection class - * (optional) + * @sel_class_max: maximum allowed GW selection class */ - ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, - size_t count); + u32 sel_class_max; + /** * @get_best_gw_node: select the best GW from the list of available * nodes (optional) diff --combined net/core/rtnetlink.c index bcebdeb59163,6d2180b8edb3..7aba4d63b069 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@@ -61,7 -61,7 +61,7 @@@ #include "dev.h"
#define RTNL_MAX_TYPE 50 - #define RTNL_SLAVE_MAX_TYPE 43 + #define RTNL_SLAVE_MAX_TYPE 44
struct rtnl_link { rtnl_doit_func doit; @@@ -1273,7 -1273,6 +1273,6 @@@ static noinline_for_stack int rtnl_fill static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct net_device *dev, int vfs_num, - struct nlattr *vfinfo, u32 ext_filter_mask) { struct ifla_vf_rss_query_en vf_rss_query_en; @@@ -1343,7 -1342,7 +1342,7 @@@ vf_trust.setting = ivi.trusted; vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); if (!vf) - goto nla_put_vfinfo_failure; + return -EMSGSIZE; if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || @@@ -1414,8 -1413,6 +1413,6 @@@
nla_put_vf_failure: nla_nest_cancel(skb, vf); - nla_put_vfinfo_failure: - nla_nest_cancel(skb, vfinfo); return -EMSGSIZE; }
@@@ -1441,8 -1438,10 +1438,10 @@@ static noinline_for_stack int rtnl_fill return -EMSGSIZE;
for (i = 0; i < num_vfs; i++) { - if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask)) + if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) { + nla_nest_cancel(skb, vfinfo); return -EMSGSIZE; + } }
nla_nest_end(skb, vfinfo); @@@ -2268,27 -2267,13 +2267,27 @@@ out_err return err; }
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr) +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr) { - return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, + const struct ifinfomsg *ifmp; + const struct nlattr *attrs; + size_t len; + + ifmp = nla_data(nla_peer); + attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); + len = nla_len(nla_peer) - sizeof(struct ifinfomsg); + + if (ifmp->ifi_index < 0) { + NL_SET_ERR_MSG_ATTR(exterr, nla_peer, + "ifindex can't be negative"); + return -EINVAL; + } + + return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, exterr); } -EXPORT_SYMBOL(rtnl_nla_parse_ifla); +EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { diff --combined net/dccp/ipv4.c index a545ad71201c,8dd6837c476a..1591b061105a --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@@ -130,7 -130,7 +130,7 @@@ int dccp_v4_connect(struct sock *sk, st inet->inet_daddr, inet->inet_sport, inet->inet_dport); - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16());
err = dccp_connect(sk); rt = NULL; @@@ -247,7 -247,6 +247,6 @@@ static int dccp_v4_err(struct sk_buff * const u8 offset = iph->ihl << 2; const struct dccp_hdr *dh; struct dccp_sock *dp; - struct inet_sock *inet; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; @@@ -361,8 -360,7 +360,7 @@@ * --ANK (980905) */
- inet = inet_sk(sk); - if (!sock_owned_by_user(sk) && inet->recverr) { + if (!sock_owned_by_user(sk) && inet_test_bit(RECVERR, sk)) { sk->sk_err = err; sk_error_report(sk); } else { /* Only an error on timeout */ @@@ -432,7 -430,7 +430,7 @@@ struct sock *dccp_v4_request_recv_sock( RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16());
if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) goto put_and_exit; @@@ -474,7 -472,8 +472,8 @@@ static struct dst_entry* dccp_v4_route_ .flowi4_oif = inet_iif(skb), .daddr = iph->saddr, .saddr = iph->daddr, - .flowi4_tos = RT_CONN_FLAGS(sk), + .flowi4_tos = ip_sock_rt_tos(sk), + .flowi4_scope = ip_sock_rt_scope(sk), .flowi4_proto = sk->sk_protocol, .fl4_sport = dccp_hdr(skb)->dccph_dport, .fl4_dport = dccp_hdr(skb)->dccph_sport, diff --combined net/devlink/leftover.c index bfed7929a904,72ba8a716525..e2cd13958cc2 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@@ -232,13 -232,13 +232,13 @@@ devlink_rate_node_get_from_attrs(struc return devlink_rate_node_get_by_name(devlink, rate_node_name); }
- struct devlink_rate * + static struct devlink_rate * devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info) { return devlink_rate_node_get_from_attrs(devlink, info->attrs); }
- struct devlink_rate * + static struct devlink_rate * devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info) { struct nlattr **attrs = info->attrs; @@@ -285,7 -285,7 +285,7 @@@ devlink_linecard_get_from_attrs(struct return ERR_PTR(-EINVAL); }
- struct devlink_linecard * + static struct devlink_linecard * devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info) { return devlink_linecard_get_from_attrs(devlink, info->attrs); @@@ -1005,8 -1005,8 +1005,8 @@@ static void devlink_rate_notify(struct }
static int - devlink_nl_cmd_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink, - struct netlink_callback *cb) + devlink_nl_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink, + struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_rate *devlink_rate; @@@ -1022,8 -1022,7 +1022,7 @@@ continue; } err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, NULL); + cb->nlh->nlmsg_seq, flags, NULL); if (err) { state->idx = idx; break; @@@ -1034,17 -1033,22 +1033,22 @@@ return err; }
- const struct devlink_cmd devl_cmd_rate_get = { - .dump_one = devlink_nl_cmd_rate_get_dump_one, - }; + int devlink_nl_rate_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_rate_get_dump_one); + }
- static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_rate_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_rate *devlink_rate = info->user_ptr[1]; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_rate *devlink_rate; struct sk_buff *msg; int err;
+ devlink_rate = devlink_rate_get_from_info(devlink, info); + if (IS_ERR(devlink_rate)) + return PTR_ERR(devlink_rate); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@@ -1072,8 -1076,7 +1076,7 @@@ devlink_rate_is_parent_node(struct devl return false; }
- static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct sk_buff *msg; @@@ -1095,8 -1098,8 +1098,8 @@@ }
static int - devlink_nl_cmd_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink, - struct netlink_callback *cb) + devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink, + struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_port *devlink_port; @@@ -1107,8 -1110,8 +1110,8 @@@ err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, cb->extack); + cb->nlh->nlmsg_seq, flags, + cb->extack); if (err) { state->idx = port_index; break; @@@ -1118,9 -1121,10 +1121,10 @@@ return err; }
- const struct devlink_cmd devl_cmd_port_get = { - .dump_one = devlink_nl_cmd_port_get_dump_one, - }; + int devlink_nl_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_port_get_dump_one); + }
static int devlink_port_type_set(struct devlink_port *devlink_port, enum devlink_port_type port_type) @@@ -1629,11 -1633,16 +1633,16 @@@ static bool devlink_rate_set_ops_suppor static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_rate *devlink_rate = info->user_ptr[1]; - struct devlink *devlink = devlink_rate->devlink; - const struct devlink_ops *ops = devlink->ops; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_rate *devlink_rate; + const struct devlink_ops *ops; int err;
+ devlink_rate = devlink_rate_get_from_info(devlink, info); + if (IS_ERR(devlink_rate)) + return PTR_ERR(devlink_rate); + + ops = devlink->ops; if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type)) return -EOPNOTSUPP;
@@@ -1704,18 -1713,22 +1713,22 @@@ err_strdup static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_rate *rate_node = info->user_ptr[1]; - struct devlink *devlink = rate_node->devlink; - const struct devlink_ops *ops = devlink->ops; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_rate *rate_node; int err;
+ rate_node = devlink_rate_node_get_from_info(devlink, info); + if (IS_ERR(rate_node)) + return PTR_ERR(rate_node); + if (refcount_read(&rate_node->refcnt) > 1) { NL_SET_ERR_MSG(info->extack, "Node has children. Cannot delete node."); return -EBUSY; }
devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL); - err = ops->rate_node_del(rate_node, rate_node->priv, info->extack); + err = devlink->ops->rate_node_del(rate_node, rate_node->priv, + info->extack); if (rate_node->parent) refcount_dec(&rate_node->parent->refcnt); list_del(&rate_node->list); @@@ -1811,14 -1824,17 +1824,17 @@@ static void devlink_linecard_notify(str msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); }
- static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_linecard *linecard = info->user_ptr[1]; - struct devlink *devlink = linecard->devlink; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_linecard *linecard; struct sk_buff *msg; int err;
+ linecard = devlink_linecard_get_from_info(devlink, info); + if (IS_ERR(linecard)) + return PTR_ERR(linecard); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@@ -1837,9 -1853,10 +1853,10 @@@ return genlmsg_reply(msg, info); }
- static int devlink_nl_cmd_linecard_get_dump_one(struct sk_buff *msg, - struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_linecard_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, + int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_linecard *linecard; @@@ -1855,8 -1872,7 +1872,7 @@@ err = devlink_nl_linecard_fill(msg, devlink, linecard, DEVLINK_CMD_LINECARD_NEW, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, + cb->nlh->nlmsg_seq, flags, cb->extack); mutex_unlock(&linecard->state_lock); if (err) { @@@ -1869,9 -1885,11 +1885,11 @@@ return err; }
- const struct devlink_cmd devl_cmd_linecard_get = { - .dump_one = devlink_nl_cmd_linecard_get_dump_one, - }; + int devlink_nl_linecard_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_linecard_get_dump_one); + }
static struct devlink_linecard_type * devlink_linecard_type_lookup(struct devlink_linecard *linecard, @@@ -2008,10 -2026,15 +2026,15 @@@ out static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_linecard *linecard = info->user_ptr[1]; struct netlink_ext_ack *extack = info->extack; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_linecard *linecard; int err;
+ linecard = devlink_linecard_get_from_info(devlink, info); + if (IS_ERR(linecard)) + return PTR_ERR(linecard); + if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) { const char *type;
@@@ -2068,8 -2091,7 +2091,7 @@@ nla_put_failure return -EMSGSIZE; }
- static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_sb *devlink_sb; @@@ -2096,8 -2118,8 +2118,8 @@@ }
static int - devlink_nl_cmd_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink, - struct netlink_callback *cb) + devlink_nl_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink, + struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; @@@ -2112,8 -2134,7 +2134,7 @@@ err = devlink_nl_sb_fill(msg, devlink, devlink_sb, DEVLINK_CMD_SB_NEW, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI); + cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; break; @@@ -2124,9 -2145,10 +2145,10 @@@ return err; }
- const struct devlink_cmd devl_cmd_sb_get = { - .dump_one = devlink_nl_cmd_sb_get_dump_one, - }; + int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_sb_get_dump_one); + }
static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, @@@ -2171,8 -2193,7 +2193,7 @@@ nla_put_failure return -EMSGSIZE; }
- static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_sb *devlink_sb; @@@ -2210,7 -2231,7 +2231,7 @@@ static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, struct devlink *devlink, struct devlink_sb *devlink_sb, - u32 portid, u32 seq) + u32 portid, u32 seq, int flags) { u16 pool_count = devlink_sb_pool_count(devlink_sb); u16 pool_index; @@@ -2225,7 -2246,7 +2246,7 @@@ devlink_sb, pool_index, DEVLINK_CMD_SB_POOL_NEW, - portid, seq, NLM_F_MULTI); + portid, seq, flags); if (err) return err; (*p_idx)++; @@@ -2234,9 -2255,8 +2255,8 @@@ }
static int - devlink_nl_cmd_sb_pool_get_dump_one(struct sk_buff *msg, - struct devlink *devlink, - struct netlink_callback *cb) + devlink_nl_sb_pool_get_dump_one(struct sk_buff *msg, struct devlink *devlink, + struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; @@@ -2250,7 -2270,7 +2270,7 @@@ err = __sb_pool_get_dumpit(msg, state->idx, &idx, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq); + cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { @@@ -2262,9 -2282,11 +2282,11 @@@ return err; }
- const struct devlink_cmd devl_cmd_sb_pool_get = { - .dump_one = devlink_nl_cmd_sb_pool_get_dump_one, - }; + int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_sb_pool_get_dump_one); + }
static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, u16 pool_index, u32 size, @@@ -2371,8 -2393,8 +2393,8 @@@ sb_occ_get_failure return err; }
- static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb, + struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; @@@ -2412,7 -2434,7 +2434,7 @@@ static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, struct devlink *devlink, struct devlink_sb *devlink_sb, - u32 portid, u32 seq) + u32 portid, u32 seq, int flags) { struct devlink_port *devlink_port; u16 pool_count = devlink_sb_pool_count(devlink_sb); @@@ -2431,8 -2453,7 +2453,7 @@@ devlink_sb, pool_index, DEVLINK_CMD_SB_PORT_POOL_NEW, - portid, seq, - NLM_F_MULTI); + portid, seq, flags); if (err) return err; (*p_idx)++; @@@ -2442,9 -2463,9 +2463,9 @@@ }
static int - devlink_nl_cmd_sb_port_pool_get_dump_one(struct sk_buff *msg, - struct devlink *devlink, - struct netlink_callback *cb) + devlink_nl_sb_port_pool_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; @@@ -2458,7 -2479,7 +2479,7 @@@ err = __sb_port_pool_get_dumpit(msg, state->idx, &idx, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq); + cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { @@@ -2470,9 -2491,11 +2491,11 @@@ return err; }
- const struct devlink_cmd devl_cmd_sb_port_pool_get = { - .dump_one = devlink_nl_cmd_sb_port_pool_get_dump_one, - }; + int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_sb_port_pool_get_dump_one); + }
static int devlink_sb_port_pool_set(struct devlink_port *devlink_port, unsigned int sb_index, u16 pool_index, @@@ -2580,8 -2603,8 +2603,8 @@@ nla_put_failure return -EMSGSIZE; }
- static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb, + struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; @@@ -2628,7 -2651,7 +2651,7 @@@ static int __sb_tc_pool_bind_get_dumpit int start, int *p_idx, struct devlink *devlink, struct devlink_sb *devlink_sb, - u32 portid, u32 seq) + u32 portid, u32 seq, int flags) { struct devlink_port *devlink_port; unsigned long port_index; @@@ -2649,7 -2672,7 +2672,7 @@@ DEVLINK_SB_POOL_TYPE_INGRESS, DEVLINK_CMD_SB_TC_POOL_BIND_NEW, portid, seq, - NLM_F_MULTI); + flags); if (err) return err; (*p_idx)++; @@@ -2667,7 -2690,7 +2690,7 @@@ DEVLINK_SB_POOL_TYPE_EGRESS, DEVLINK_CMD_SB_TC_POOL_BIND_NEW, portid, seq, - NLM_F_MULTI); + flags); if (err) return err; (*p_idx)++; @@@ -2676,10 -2699,10 +2699,10 @@@ return 0; }
- static int - devlink_nl_cmd_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg, - struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, + int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; @@@ -2693,7 -2716,7 +2716,7 @@@ err = __sb_tc_pool_bind_get_dumpit(msg, state->idx, &idx, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq); + cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { @@@ -2705,9 -2728,12 +2728,12 @@@ return err; }
- const struct devlink_cmd devl_cmd_sb_tc_pool_bind_get = { - .dump_one = devlink_nl_cmd_sb_tc_pool_bind_get_dump_one, - }; + int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, + devlink_nl_sb_tc_pool_bind_get_dump_one); + }
static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, unsigned int sb_index, u16 tc_index, @@@ -3946,7 -3972,7 +3972,7 @@@ static int devlink_param_get(struct dev const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { - if (!param->get || devlink->reload_failed) + if (!param->get) return -EOPNOTSUPP; return param->get(devlink, param->id, ctx); } @@@ -3955,7 -3981,7 +3981,7 @@@ static int devlink_param_set(struct dev const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { - if (!param->set || devlink->reload_failed) + if (!param->set) return -EOPNOTSUPP; return param->set(devlink, param->id, ctx); } @@@ -4155,9 -4181,10 +4181,10 @@@ static void devlink_param_notify(struc msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); }
- static int - devlink_nl_cmd_param_get_dump_one(struct sk_buff *msg, struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_param_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, + int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_param_item *param_item; @@@ -4168,8 -4195,7 +4195,7 @@@ err = devlink_nl_param_fill(msg, devlink, 0, param_item, DEVLINK_CMD_PARAM_GET, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI); + cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { @@@ -4181,9 -4207,11 +4207,11 @@@ return err; }
- const struct devlink_cmd devl_cmd_param_get = { - .dump_one = devlink_nl_cmd_param_get_dump_one, - }; + int devlink_nl_param_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_param_get_dump_one); + }
static int devlink_param_type_get_from_info(struct genl_info *info, @@@ -4272,8 -4300,8 +4300,8 @@@ devlink_param_get_from_info(struct xarr return devlink_param_find_by_name(params, param_name); }
- static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_param_get_doit(struct sk_buff *skb, + struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_param_item *param_item; @@@ -4770,8 -4798,7 +4798,7 @@@ static void devlink_region_snapshot_del kfree(snapshot); }
- static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_region_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_port *port = NULL; @@@ -4819,8 -4846,7 +4846,7 @@@ static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg, struct netlink_callback *cb, struct devlink_port *port, - int *idx, - int start) + int *idx, int start, int flags) { struct devlink_region *region; int err = 0; @@@ -4834,7 -4860,7 +4860,7 @@@ DEVLINK_CMD_REGION_GET, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - NLM_F_MULTI, region); + flags, region); if (err) goto out; (*idx)++; @@@ -4844,9 -4870,10 +4870,10 @@@ out return err; }
- static int - devlink_nl_cmd_region_get_dump_one(struct sk_buff *msg, struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_region_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, + int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_region *region; @@@ -4863,8 -4890,8 +4890,8 @@@ err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, region); + cb->nlh->nlmsg_seq, flags, + region); if (err) { state->idx = idx; return err; @@@ -4874,7 -4901,7 +4901,7 @@@
xa_for_each(&devlink->ports, port_index, port) { err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, &idx, - state->idx); + state->idx, flags); if (err) { state->idx = idx; return err; @@@ -4884,9 -4911,11 +4911,11 @@@ return 0; }
- const struct devlink_cmd devl_cmd_region_get = { - .dump_one = devlink_nl_cmd_region_get_dump_one, - }; + int devlink_nl_region_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_region_get_dump_one); + }
static int devlink_nl_cmd_region_del(struct sk_buff *skb, struct genl_info *info) @@@ -5172,7 -5201,7 +5201,7 @@@ static int devlink_nl_cmd_region_read_d struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct nlattr *chunks_attr, *region_attr, *snapshot_attr; u64 ret_offset, start_offset, end_offset = U64_MAX; - struct nlattr **attrs = info->attrs; + struct nlattr **attrs = info->info.attrs; struct devlink_port *port = NULL; devlink_chunk_fill_t *region_cb; struct devlink_region *region; @@@ -5195,8 -5224,8 +5224,8 @@@ goto out_unlock; }
- if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) { - index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]); + if (attrs[DEVLINK_ATTR_PORT_INDEX]) { + index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
port = devlink_port_get_by_index(devlink, index); if (!port) { @@@ -5632,8 -5661,7 +5661,7 @@@ nla_put_failure return -EMSGSIZE; }
- static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_trap_get_doit(struct sk_buff *skb, struct genl_info *info) { struct netlink_ext_ack *extack = info->extack; struct devlink *devlink = info->user_ptr[0]; @@@ -5667,9 -5695,9 +5695,9 @@@ err_trap_fill return err; }
- static int - devlink_nl_cmd_trap_get_dump_one(struct sk_buff *msg, struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_trap_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_trap_item *trap_item; @@@ -5684,8 -5712,7 +5712,7 @@@ err = devlink_nl_trap_fill(msg, devlink, trap_item, DEVLINK_CMD_TRAP_NEW, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI); + cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; break; @@@ -5696,9 -5723,10 +5723,10 @@@ return err; }
- const struct devlink_cmd devl_cmd_trap_get = { - .dump_one = devlink_nl_cmd_trap_get_dump_one, - }; + int devlink_nl_trap_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_trap_get_dump_one); + }
static int __devlink_trap_action_set(struct devlink *devlink, struct devlink_trap_item *trap_item, @@@ -5843,8 -5871,7 +5871,7 @@@ nla_put_failure return -EMSGSIZE; }
- static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_trap_group_get_doit(struct sk_buff *skb, struct genl_info *info) { struct netlink_ext_ack *extack = info->extack; struct devlink *devlink = info->user_ptr[0]; @@@ -5878,10 -5905,10 +5905,10 @@@ err_trap_group_fill return err; }
- static int - devlink_nl_cmd_trap_group_get_dump_one(struct sk_buff *msg, - struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_trap_group_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, + int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_trap_group_item *group_item; @@@ -5897,8 -5924,7 +5924,7 @@@ err = devlink_nl_trap_group_fill(msg, devlink, group_item, DEVLINK_CMD_TRAP_GROUP_NEW, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI); + cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; break; @@@ -5909,9 -5935,11 +5935,11 @@@ return err; }
- const struct devlink_cmd devl_cmd_trap_group_get = { - .dump_one = devlink_nl_cmd_trap_group_get_dump_one, - }; + int devlink_nl_trap_group_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_trap_group_get_dump_one); + }
static int __devlink_trap_group_action_set(struct devlink *devlink, @@@ -6137,8 -6165,8 +6165,8 @@@ nla_put_failure return -EMSGSIZE; }
- static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb, - struct genl_info *info) + int devlink_nl_trap_policer_get_doit(struct sk_buff *skb, + struct genl_info *info) { struct devlink_trap_policer_item *policer_item; struct netlink_ext_ack *extack = info->extack; @@@ -6172,10 -6200,10 +6200,10 @@@ err_trap_policer_fill return err; }
- static int - devlink_nl_cmd_trap_policer_get_dump_one(struct sk_buff *msg, - struct devlink *devlink, - struct netlink_callback *cb) + static int devlink_nl_trap_policer_get_dump_one(struct sk_buff *msg, + struct devlink *devlink, + struct netlink_callback *cb, + int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_trap_policer_item *policer_item; @@@ -6190,8 -6218,7 +6218,7 @@@ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, DEVLINK_CMD_TRAP_POLICER_NEW, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI); + cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; break; @@@ -6202,9 -6229,11 +6229,11 @@@ return err; }
- const struct devlink_cmd devl_cmd_trap_policer_get = { - .dump_one = devlink_nl_cmd_trap_policer_get_dump_one, - }; + int devlink_nl_trap_policer_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) + { + return devlink_nl_dumpit(skb, cb, devlink_nl_trap_policer_get_dump_one); + }
static int devlink_trap_policer_set(struct devlink *devlink, @@@ -6278,22 -6307,7 +6307,7 @@@ static int devlink_nl_cmd_trap_policer_ return devlink_trap_policer_set(devlink, policer_item, info); }
- const struct genl_small_ops devlink_nl_ops[56] = { - { - .cmd = DEVLINK_CMD_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, - { - .cmd = DEVLINK_CMD_PORT_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_port_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, - /* can be retrieved by unprivileged users */ - }, + const struct genl_small_ops devlink_nl_small_ops[40] = { { .cmd = DEVLINK_CMD_PORT_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@@ -6301,18 -6315,10 +6315,10 @@@ .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, - { - .cmd = DEVLINK_CMD_RATE_GET, - .doit = devlink_nl_cmd_rate_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_RATE, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_RATE_SET, .doit = devlink_nl_cmd_rate_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_RATE, }, { .cmd = DEVLINK_CMD_RATE_NEW, @@@ -6323,7 -6329,6 +6329,6 @@@ .cmd = DEVLINK_CMD_RATE_DEL, .doit = devlink_nl_cmd_rate_del_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_RATE_NODE, }, { .cmd = DEVLINK_CMD_PORT_SPLIT, @@@ -6350,32 -6355,11 +6355,11 @@@ .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, - { - .cmd = DEVLINK_CMD_LINECARD_GET, - .doit = devlink_nl_cmd_linecard_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, - /* can be retrieved by unprivileged users */ - }, + { .cmd = DEVLINK_CMD_LINECARD_SET, .doit = devlink_nl_cmd_linecard_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, - }, - { - .cmd = DEVLINK_CMD_SB_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_sb_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, - { - .cmd = DEVLINK_CMD_SB_POOL_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_sb_pool_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ }, { .cmd = DEVLINK_CMD_SB_POOL_SET, @@@ -6383,14 -6367,6 +6367,6 @@@ .doit = devlink_nl_cmd_sb_pool_set_doit, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_SB_PORT_POOL_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_sb_port_pool_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_SB_PORT_POOL_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@@ -6398,14 -6374,6 +6374,6 @@@ .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, - { - .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@@ -6479,13 -6447,6 +6447,6 @@@ .doit = devlink_nl_cmd_reload, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_PARAM_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_param_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_PARAM_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@@ -6507,13 -6468,6 +6468,6 @@@ .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, - { - .cmd = DEVLINK_CMD_REGION_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_region_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .flags = GENL_ADMIN_PERM, - }, { .cmd = DEVLINK_CMD_REGION_NEW, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@@ -6533,21 -6487,6 +6487,6 @@@ .dumpit = devlink_nl_cmd_region_read_dumpit, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_INFO_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_info_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, - { - .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_health_reporter_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@@ -6596,45 -6535,21 +6535,21 @@@ .doit = devlink_nl_cmd_flash_update, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_TRAP_GET, - .doit = devlink_nl_cmd_trap_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_TRAP_SET, .doit = devlink_nl_cmd_trap_set_doit, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_TRAP_GROUP_GET, - .doit = devlink_nl_cmd_trap_group_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_TRAP_GROUP_SET, .doit = devlink_nl_cmd_trap_group_set_doit, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_TRAP_POLICER_GET, - .doit = devlink_nl_cmd_trap_policer_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_TRAP_POLICER_SET, .doit = devlink_nl_cmd_trap_policer_set_doit, .flags = GENL_ADMIN_PERM, }, - { - .cmd = DEVLINK_CMD_SELFTESTS_GET, - .doit = devlink_nl_cmd_selftests_get_doit, - .dumpit = devlink_nl_instance_iter_dumpit, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_SELFTESTS_RUN, .doit = devlink_nl_cmd_selftests_run, @@@ -6704,7 -6619,6 +6619,7 @@@ void devlink_notify_unregister(struct d struct devlink_param_item *param_item; struct devlink_trap_item *trap_item; struct devlink_port *devlink_port; + struct devlink_linecard *linecard; struct devlink_rate *rate_node; struct devlink_region *region; unsigned long port_index; @@@ -6733,8 -6647,6 +6648,8 @@@
xa_for_each(&devlink->ports, port_index, devlink_port) devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); + list_for_each_entry_reverse(linecard, &devlink->linecard_list, list) + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); devlink_notify(devlink, DEVLINK_CMD_DEL); }
@@@ -6846,8 -6758,10 +6761,10 @@@ int devl_port_register_with_ops(struct spin_lock_init(&devlink_port->type_lock); INIT_LIST_HEAD(&devlink_port->reporter_list); err = xa_insert(&devlink->ports, port_index, devlink_port, GFP_KERNEL); - if (err) + if (err) { + devlink_port->registered = false; return err; + }
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); devlink_port_type_warn_schedule(devlink_port); diff --combined net/ipv4/af_inet.c index 02736b83c303,e07ee60625d9..3d2e30e20473 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@@ -187,24 -187,13 +187,13 @@@ static int inet_autobind(struct sock *s return 0; }
- /* - * Move a socket into listening state. - */ - int inet_listen(struct socket *sock, int backlog) + int __inet_listen_sk(struct sock *sk, int backlog) { - struct sock *sk = sock->sk; - unsigned char old_state; + unsigned char old_state = sk->sk_state; int err, tcp_fastopen;
- lock_sock(sk); - - err = -EINVAL; - if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) - goto out; - - old_state = sk->sk_state; if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN))) - goto out; + return -EINVAL;
WRITE_ONCE(sk->sk_max_ack_backlog, backlog); /* Really, if the socket is already in listen state @@@ -227,10 -216,27 +216,27 @@@
err = inet_csk_listen_start(sk); if (err) - goto out; + return err; + tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); } - err = 0; + return 0; + } + + /* + * Move a socket into listening state. + */ + int inet_listen(struct socket *sock, int backlog) + { + struct sock *sk = sock->sk; + int err = -EINVAL; + + lock_sock(sk); + + if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) + goto out; + + err = __inet_listen_sk(sk, backlog);
out: release_sock(sk); @@@ -325,14 -331,14 +331,14 @@@ lookup_protocol sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk); - inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; + inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
- inet->nodefrag = 0; + inet_clear_bit(NODEFRAG, sk);
if (SOCK_RAW == sock->type) { inet->inet_num = protocol; if (IPPROTO_RAW == protocol) - inet->hdrincl = 1; + inet_set_bit(HDRINCL, sk); }
if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) @@@ -340,7 -346,7 +346,7 @@@ else inet->pmtudisc = IP_PMTUDISC_WANT;
- inet->inet_id = 0; + atomic_set(&inet->inet_id, 0);
sock_init_data(sock, sk);
@@@ -350,9 -356,9 +356,9 @@@ sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
inet->uc_ttl = -1; - inet->mc_loop = 1; + inet_set_bit(MC_LOOP, sk); inet->mc_ttl = 1; - inet->mc_all = 1; + inet_set_bit(MC_ALL, sk); inet->mc_index = 0; inet->mc_list = NULL; inet->rcv_tos = 0; @@@ -431,9 -437,8 +437,8 @@@ int inet_release(struct socket *sock } EXPORT_SYMBOL(inet_release);
- int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len) { - struct sock *sk = sock->sk; u32 flags = BIND_WITH_LOCK; int err;
@@@ -454,6 -459,11 +459,11 @@@
return __inet_bind(sk, uaddr, addr_len, flags); } + + int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + return inet_bind_sk(sock->sk, uaddr, addr_len); + } EXPORT_SYMBOL(inet_bind);
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, @@@ -519,7 -529,7 +529,7 @@@ inet->inet_saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */ - if (snum || !(inet->bind_address_no_port || + if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) || (flags & BIND_FORCE_ADDRESS_NO_PORT))) { err = sk->sk_prot->get_port(sk, snum); if (err) { @@@ -646,7 -656,7 +656,7 @@@ int __inet_stream_connect(struct socke err = -EISCONN; goto out; case SS_CONNECTING: - if (inet_sk(sk)->defer_connect) + if (inet_test_bit(DEFER_CONNECT, sk)) err = is_sendmsg ? -EINPROGRESS : -EISCONN; else err = -EALREADY; @@@ -669,7 -679,7 +679,7 @@@
sock->state = SS_CONNECTING;
- if (!err && inet_sk(sk)->defer_connect) + if (!err && inet_test_bit(DEFER_CONNECT, sk)) goto out;
/* Just entered SS_CONNECTING state; the only diff --combined net/ipv4/tcp.c index b9d49803e77f,cee1e548660c..b1559481898d --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -457,6 -457,7 +457,7 @@@ void tcp_init_sock(struct sock *sk
WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); + tcp_scaling_ratio_init(sk);
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); sk_sockets_allocated_inc(sk); @@@ -582,7 -583,8 +583,8 @@@ __poll_t tcp_poll(struct file *file, st
if (urg_data & TCP_URG_VALID) mask |= EPOLLPRI; - } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { + } else if (state == TCP_SYN_SENT && + inet_test_bit(DEFER_CONNECT, sk)) { /* Active TCP fastopen socket with defer_connect * Return EPOLLOUT so application can call write() * in order for kernel to generate SYN+data @@@ -1006,7 -1008,7 +1008,7 @@@ int tcp_sendmsg_fastopen(struct sock *s tp->fastopen_req->size = size; tp->fastopen_req->uarg = uarg;
- if (inet->defer_connect) { + if (inet_test_bit(DEFER_CONNECT, sk)) { err = tcp_connect(sk); /* Same failure procedure as in tcp_v4/6_connect */ if (err) { @@@ -1024,7 -1026,7 +1026,7 @@@ if (tp->fastopen_req) { *copied = tp->fastopen_req->copied; tcp_free_fastopen_req(tp); - inet->defer_connect = 0; + inet_clear_bit(DEFER_CONNECT, sk); } return err; } @@@ -1065,7 -1067,8 +1067,8 @@@ int tcp_sendmsg_locked(struct sock *sk zc = MSG_SPLICE_PAGES; }
- if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && + if (unlikely(flags & MSG_FASTOPEN || + inet_test_bit(DEFER_CONNECT, sk)) && !tp->repair) { err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); if (err == -EINPROGRESS && copied_syn > 0) @@@ -1700,7 -1703,7 +1703,7 @@@ EXPORT_SYMBOL(tcp_peek_len) /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ int tcp_set_rcvlowat(struct sock *sk, int val) { - int cap; + int space, cap;
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) cap = sk->sk_rcvbuf >> 1; @@@ -1715,10 -1718,10 +1718,10 @@@ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) return 0;
- val <<= 1; - if (val > sk->sk_rcvbuf) { - WRITE_ONCE(sk->sk_rcvbuf, val); - tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); + space = tcp_space_from_win(sk, val); + if (space > sk->sk_rcvbuf) { + WRITE_ONCE(sk->sk_rcvbuf, space); + tcp_sk(sk)->window_clamp = val; } return 0; } @@@ -1739,7 -1742,7 +1742,7 @@@ void tcp_update_recv_tstamps(struct sk_ }
#ifdef CONFIG_MMU -const struct vm_operations_struct tcp_vm_ops = { +static const struct vm_operations_struct tcp_vm_ops = { };
int tcp_mmap(struct file *file, struct socket *sock, @@@ -2042,10 -2045,13 +2045,10 @@@ static struct vm_area_struct *find_tcp_ unsigned long address, bool *mmap_locked) { - struct vm_area_struct *vma = NULL; + struct vm_area_struct *vma = lock_vma_under_rcu(mm, address);
-#ifdef CONFIG_PER_VMA_LOCK - vma = lock_vma_under_rcu(mm, address); -#endif if (vma) { - if (!vma_is_tcp(vma)) { + if (vma->vm_ops != &tcp_vm_ops) { vma_end_read(vma); return NULL; } @@@ -2055,7 -2061,7 +2058,7 @@@
mmap_read_lock(mm); vma = vma_lookup(mm, address); - if (!vma || !vma_is_tcp(vma)) { + if (!vma || vma->vm_ops != &tcp_vm_ops) { mmap_read_unlock(mm); return NULL; } @@@ -2861,7 -2867,7 +2864,7 @@@ adjudge_to_death
if (sk->sk_state == TCP_FIN_WAIT2) { struct tcp_sock *tp = tcp_sk(sk); - if (tp->linger2 < 0) { + if (READ_ONCE(tp->linger2) < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); __NET_INC_STATS(sock_net(sk), @@@ -3084,7 -3090,7 +3087,7 @@@ int tcp_disconnect(struct sock *sk, in
/* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); - inet->defer_connect = 0; + inet_clear_bit(DEFER_CONNECT, sk); tp->fastopen_client_fail = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); @@@ -3287,18 -3293,21 +3290,21 @@@ int tcp_sock_set_syncnt(struct sock *sk if (val < 1 || val > MAX_TCP_SYNCNT) return -EINVAL;
- lock_sock(sk); WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); - release_sock(sk); return 0; } EXPORT_SYMBOL(tcp_sock_set_syncnt);
- void tcp_sock_set_user_timeout(struct sock *sk, u32 val) + int tcp_sock_set_user_timeout(struct sock *sk, int val) { - lock_sock(sk); + /* Cap the max time in ms TCP will retry or probe the window + * before giving up and aborting (ETIMEDOUT) a connection. + */ + if (val < 0) + return -EINVAL; + WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); - release_sock(sk); + return 0; } EXPORT_SYMBOL(tcp_sock_set_user_timeout);
@@@ -3341,9 -3350,7 +3347,7 @@@ int tcp_sock_set_keepintvl(struct sock if (val < 1 || val > MAX_TCP_KEEPINTVL) return -EINVAL;
- lock_sock(sk); WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); - release_sock(sk); return 0; } EXPORT_SYMBOL(tcp_sock_set_keepintvl); @@@ -3353,10 -3360,8 +3357,8 @@@ int tcp_sock_set_keepcnt(struct sock *s if (val < 1 || val > MAX_TCP_KEEPCNT) return -EINVAL;
- lock_sock(sk); /* Paired with READ_ONCE() in keepalive_probes() */ WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); - release_sock(sk); return 0; } EXPORT_SYMBOL(tcp_sock_set_keepcnt); @@@ -3458,6 -3463,32 +3460,32 @@@ int do_tcp_setsockopt(struct sock *sk, if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT;
+ /* Handle options that can be set without locking the socket. */ + switch (optname) { + case TCP_SYNCNT: + return tcp_sock_set_syncnt(sk, val); + case TCP_USER_TIMEOUT: + return tcp_sock_set_user_timeout(sk, val); + case TCP_KEEPINTVL: + return tcp_sock_set_keepintvl(sk, val); + case TCP_KEEPCNT: + return tcp_sock_set_keepcnt(sk, val); + case TCP_LINGER2: + if (val < 0) + WRITE_ONCE(tp->linger2, -1); + else if (val > TCP_FIN_TIMEOUT_MAX / HZ) + WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); + else + WRITE_ONCE(tp->linger2, val * HZ); + return 0; + case TCP_DEFER_ACCEPT: + /* Translate value in seconds to number of retransmits */ + WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, + secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, + TCP_RTO_MAX / HZ)); + return 0; + } + sockopt_lock_sock(sk);
switch (optname) { @@@ -3553,25 -3584,6 +3581,6 @@@ case TCP_KEEPIDLE: err = tcp_sock_set_keepidle_locked(sk, val); break; - case TCP_KEEPINTVL: - if (val < 1 || val > MAX_TCP_KEEPINTVL) - err = -EINVAL; - else - WRITE_ONCE(tp->keepalive_intvl, val * HZ); - break; - case TCP_KEEPCNT: - if (val < 1 || val > MAX_TCP_KEEPCNT) - err = -EINVAL; - else - WRITE_ONCE(tp->keepalive_probes, val); - break; - case TCP_SYNCNT: - if (val < 1 || val > MAX_TCP_SYNCNT) - err = -EINVAL; - else - WRITE_ONCE(icsk->icsk_syn_retries, val); - break; - case TCP_SAVE_SYN: /* 0: disable, 1: enable, 2: start from ether_header */ if (val < 0 || val > 2) @@@ -3580,22 -3592,6 +3589,6 @@@ tp->save_syn = val; break;
- case TCP_LINGER2: - if (val < 0) - WRITE_ONCE(tp->linger2, -1); - else if (val > TCP_FIN_TIMEOUT_MAX / HZ) - WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); - else - WRITE_ONCE(tp->linger2, val * HZ); - break; - - case TCP_DEFER_ACCEPT: - /* Translate value in seconds to number of retransmits */ - WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, - secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, - TCP_RTO_MAX / HZ)); - break; - case TCP_WINDOW_CLAMP: err = tcp_set_window_clamp(sk, val); break; @@@ -3610,16 -3606,6 +3603,6 @@@ err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif - case TCP_USER_TIMEOUT: - /* Cap the max time in ms TCP will retry or probe the window - * before giving up and aborting (ETIMEDOUT) a connection. - */ - if (val < 0) - err = -EINVAL; - else - WRITE_ONCE(icsk->icsk_user_timeout, val); - break; - case TCP_FASTOPEN: if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { diff --combined net/ipv4/tcp_ipv4.c index 2dbdc26da86e,2a662d5f3072..27140e5cdc06 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@@ -57,6 -57,7 +57,7 @@@ #include <linux/init.h> #include <linux/times.h> #include <linux/slab.h> + #include <linux/sched.h>
#include <net/net_namespace.h> #include <net/icmp.h> @@@ -312,7 -313,7 +313,7 @@@ int tcp_v4_connect(struct sock *sk, str inet->inet_daddr)); }
- inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16());
if (tcp_fastopen_defer_connect(sk, &err)) return err; @@@ -476,7 -477,6 +477,6 @@@ int tcp_v4_err(struct sk_buff *skb, u3 const struct iphdr *iph = (const struct iphdr *)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); struct tcp_sock *tp; - struct inet_sock *inet; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; @@@ -624,8 -624,8 +624,8 @@@ * --ANK (980905) */
- inet = inet_sk(sk); - if (!sock_owned_by_user(sk) && inet->recverr) { + if (!sock_owned_by_user(sk) && + inet_test_bit(RECVERR, sk)) { WRITE_ONCE(sk->sk_err, err); sk_error_report(sk); } else { /* Only an error on timeout */ @@@ -1596,7 -1596,7 +1596,7 @@@ struct sock *tcp_v4_syn_recv_sock(cons inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16());
/* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). @@@ -2448,6 -2448,8 +2448,8 @@@ static void *established_get_first(stru struct hlist_nulls_node *node; spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
+ cond_resched(); + /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(hinfo, st)) continue; diff --combined net/sctp/socket.c index 76f1bce49a8e,04b390892827..fd0631e70d46 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@@ -99,7 -99,7 +99,7 @@@ struct percpu_counter sctp_sockets_allo
static void sctp_enter_memory_pressure(struct sock *sk) { - sctp_memory_pressure = 1; + WRITE_ONCE(sctp_memory_pressure, 1); }
@@@ -9479,10 -9479,10 +9479,10 @@@ void sctp_copy_sock(struct sock *newsk newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16());
newinet->uc_ttl = inet->uc_ttl; - newinet->mc_loop = 1; + inet_set_bit(MC_LOOP, newsk); newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; @@@ -9732,6 -9732,7 +9732,7 @@@ struct proto sctpv6_prot = .unhash = sctp_unhash, .no_autobind = true, .obj_size = sizeof(struct sctp6_sock), + .ipv6_pinfo_offset = offsetof(struct sctp6_sock, inet6), .useroffset = offsetof(struct sctp6_sock, sctp.subscribe), .usersize = offsetof(struct sctp6_sock, sctp.initmsg) - offsetof(struct sctp6_sock, sctp.subscribe) + diff --combined net/sunrpc/svcsock.c index 2864af3abdca,8c9a8ee76aa0..998687421fa6 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@@ -36,8 -36,6 +36,8 @@@ #include <linux/skbuff.h> #include <linux/file.h> #include <linux/freezer.h> +#include <linux/bvec.h> + #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> @@@ -45,7 -43,7 +45,7 @@@ #include <net/udp.h> #include <net/tcp.h> #include <net/tcp_states.h> - #include <net/tls.h> + #include <net/tls_prot.h> #include <net/handshake.h> #include <linux/uaccess.h> #include <linux/highmem.h> @@@ -228,27 -226,30 +228,30 @@@ static int svc_one_sock_name(struct svc }
static int - svc_tcp_sock_process_cmsg(struct svc_sock *svsk, struct msghdr *msg, + svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg, struct cmsghdr *cmsg, int ret) { - if (cmsg->cmsg_level == SOL_TLS && - cmsg->cmsg_type == TLS_GET_RECORD_TYPE) { - u8 content_type = *((u8 *)CMSG_DATA(cmsg)); - - switch (content_type) { - case TLS_RECORD_TYPE_DATA: - /* TLS sets EOR at the end of each application data - * record, even though there might be more frames - * waiting to be decrypted. - */ - msg->msg_flags &= ~MSG_EOR; - break; - case TLS_RECORD_TYPE_ALERT: - ret = -ENOTCONN; - break; - default: - ret = -EAGAIN; - } + u8 content_type = tls_get_record_type(sock->sk, cmsg); + u8 level, description; + + switch (content_type) { + case 0: + break; + case TLS_RECORD_TYPE_DATA: + /* TLS sets EOR at the end of each application data + * record, even though there might be more frames + * waiting to be decrypted. + */ + msg->msg_flags &= ~MSG_EOR; + break; + case TLS_RECORD_TYPE_ALERT: + tls_alert_recv(sock->sk, msg, &level, &description); + ret = (level == TLS_ALERT_LEVEL_FATAL) ? + -ENOTCONN : -EAGAIN; + break; + default: + /* discard this record type */ + ret = -EAGAIN; } return ret; } @@@ -260,13 -261,14 +263,14 @@@ svc_tcp_sock_recv_cmsg(struct svc_sock struct cmsghdr cmsg; u8 buf[CMSG_SPACE(sizeof(u8))]; } u; + struct socket *sock = svsk->sk_sock; int ret;
msg->msg_control = &u; msg->msg_controllen = sizeof(u); - ret = sock_recvmsg(svsk->sk_sock, msg, MSG_DONTWAIT); + ret = sock_recvmsg(sock, msg, MSG_DONTWAIT); if (unlikely(msg->msg_controllen != sizeof(u))) - ret = svc_tcp_sock_process_cmsg(svsk, msg, &u.cmsg, ret); + ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret); return ret; }
@@@ -693,10 -695,9 +697,10 @@@ static int svc_udp_sendto(struct svc_rq .msg_name = &rqstp->rq_addr, .msg_namelen = rqstp->rq_addrlen, .msg_control = cmh, + .msg_flags = MSG_SPLICE_PAGES, .msg_controllen = sizeof(buffer), }; - unsigned int sent; + unsigned int count; int err;
svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt); @@@ -709,23 -710,22 +713,23 @@@ if (svc_xprt_is_dead(xprt)) goto out_notconn;
- err = xdr_alloc_bvec(xdr, GFP_KERNEL); - if (err < 0) - goto out_unlock; + count = xdr_buf_to_bvec(rqstp->rq_bvec, + ARRAY_SIZE(rqstp->rq_bvec), xdr);
- err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, + count, 0); + err = sock_sendmsg(svsk->sk_sock, &msg); if (err == -ECONNREFUSED) { /* ICMP error on earlier request. */ - err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, + count, 0); + err = sock_sendmsg(svsk->sk_sock, &msg); } - xdr_free_bvec(xdr); + trace_svcsock_udp_send(xprt, err); -out_unlock: + mutex_unlock(&xprt->xpt_mutex); - if (err < 0) - return err; - return sent; + return err;
out_notconn: mutex_unlock(&xprt->xpt_mutex); @@@ -1089,9 -1089,6 +1093,9 @@@ static void svc_tcp_fragment_received(s /* If we have more data, signal svc_xprt_enqueue() to try again */ svsk->sk_tcplen = 0; svsk->sk_marker = xdr_zero; + + smp_wmb(); + tcp_set_rcvlowat(svsk->sk_sk, 1); }
/** @@@ -1181,17 -1178,10 +1185,17 @@@ err_incomplete goto err_delete; if (len == want) svc_tcp_fragment_received(svsk); - else + else { + /* Avoid more ->sk_data_ready() calls until the rest + * of the message has arrived. This reduces service + * thread wake-ups on large incoming messages. */ + tcp_set_rcvlowat(svsk->sk_sk, + svc_sock_reclen(svsk) - svsk->sk_tcplen); + trace_svcsock_tcp_recv_short(&svsk->sk_xprt, svc_sock_reclen(svsk), svsk->sk_tcplen - sizeof(rpc_fraghdr)); + } goto err_noclose; error: if (len != -EAGAIN) @@@ -1208,51 -1198,75 +1212,51 @@@ err_noclose return 0; /* record not complete */ }
-static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec, - int flags) -{ - struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | flags, }; - - iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, vec->iov_len); - return sock_sendmsg(sock, &msg); -} - /* * MSG_SPLICE_PAGES is used exclusively to reduce the number of * copy operations in this path. Therefore the caller must ensure * that the pages backing @xdr are unchanging. * - * In addition, the logic assumes that * .bv_len is never larger - * than PAGE_SIZE. + * Note that the send is non-blocking. The caller has incremented + * the reference count on each page backing the RPC message, and + * the network layer will "put" these pages when transmission is + * complete. + * + * This is safe for our RPC services because the memory backing + * the head and tail components is never kmalloc'd. These always + * come from pages in the svc_rqst::rq_pages array. */ -static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr, +static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp, rpc_fraghdr marker, unsigned int *sentp) { - const struct kvec *head = xdr->head; - const struct kvec *tail = xdr->tail; - struct kvec rm = { - .iov_base = &marker, - .iov_len = sizeof(marker), - }; struct msghdr msg = { - .msg_flags = 0, + .msg_flags = MSG_SPLICE_PAGES, }; + unsigned int count; + void *buf; int ret;
*sentp = 0; - ret = xdr_alloc_bvec(xdr, GFP_KERNEL); - if (ret < 0) - return ret; - - ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len); - if (ret < 0) - return ret; - *sentp += ret; - if (ret != rm.iov_len) - return -EAGAIN; - - ret = svc_tcp_send_kvec(sock, head, 0); - if (ret < 0) - return ret; - *sentp += ret; - if (ret != head->iov_len) - goto out;
- if (xdr_buf_pagecount(xdr)) - xdr->bvec[0].bv_offset = offset_in_page(xdr->page_base); - - msg.msg_flags = MSG_SPLICE_PAGES; - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec, - xdr_buf_pagecount(xdr), xdr->page_len); - ret = sock_sendmsg(sock, &msg); + /* The stream record marker is copied into a temporary page + * fragment buffer so that it can be included in rq_bvec. + */ + buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, &marker, sizeof(marker)); + bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker)); + + count = xdr_buf_to_bvec(rqstp->rq_bvec + 1, + ARRAY_SIZE(rqstp->rq_bvec) - 1, &rqstp->rq_res); + + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, + 1 + count, sizeof(marker) + rqstp->rq_res.len); + ret = sock_sendmsg(svsk->sk_sock, &msg); if (ret < 0) return ret; *sentp += ret; - - if (tail->iov_len) { - ret = svc_tcp_send_kvec(sock, tail, 0); - if (ret < 0) - return ret; - *sentp += ret; - } - -out: return 0; }
@@@ -1278,17 -1292,23 +1282,17 @@@ static int svc_tcp_sendto(struct svc_rq svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt); rqstp->rq_xprt_ctxt = NULL;
- atomic_inc(&svsk->sk_sendqlen); mutex_lock(&xprt->xpt_mutex); if (svc_xprt_is_dead(xprt)) goto out_notconn; - tcp_sock_set_cork(svsk->sk_sk, true); - err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent); - xdr_free_bvec(xdr); + err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent); trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent); if (err < 0 || sent != (xdr->len + sizeof(marker))) goto out_close; - if (atomic_dec_and_test(&svsk->sk_sendqlen)) - tcp_sock_set_cork(svsk->sk_sk, false); mutex_unlock(&xprt->xpt_mutex); return sent;
out_notconn: - atomic_dec(&svsk->sk_sendqlen); mutex_unlock(&xprt->xpt_mutex); return -ENOTCONN; out_close: @@@ -1297,6 -1317,7 +1301,6 @@@ (err < 0) ? "got error" : "sent", (err < 0) ? err : sent, xdr->len); svc_xprt_deferred_close(xprt); - atomic_dec(&svsk->sk_sendqlen); mutex_unlock(&xprt->xpt_mutex); return -EAGAIN; } @@@ -1607,6 -1628,8 +1611,8 @@@ static void svc_tcp_sock_detach(struct { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
+ tls_handshake_close(svsk->sk_sock); + svc_sock_detach(xprt);
if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) { @@@ -1621,7 -1644,6 +1627,7 @@@ static void svc_sock_free(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); + struct page_frag_cache *pfc = &svsk->sk_frag_cache; struct socket *sock = svsk->sk_sock;
trace_svcsock_free(svsk, sock); @@@ -1631,8 -1653,5 +1637,8 @@@ sockfd_put(sock); else sock_release(sock); + if (pfc->va) + __page_frag_cache_drain(virt_to_head_page(pfc->va), + pfc->pagecnt_bias); kfree(svsk); } diff --combined security/security.c index 549104a447e3,2dfc7b9f6ed9..3b454e9442b1 --- a/security/security.c +++ b/security/security.c @@@ -1138,20 -1138,6 +1138,20 @@@ void security_bprm_committed_creds(stru call_void_hook(bprm_committed_creds, bprm); }
+/** + * security_fs_context_submount() - Initialise fc->security + * @fc: new filesystem context + * @reference: dentry reference for submount/remount + * + * Fill out the ->security field for a new fs_context. + * + * Return: Returns 0 on success or negative error code on failure. + */ +int security_fs_context_submount(struct fs_context *fc, struct super_block *reference) +{ + return call_int_hook(fs_context_submount, 0, fc, reference); +} + /** * security_fs_context_dup() - Duplicate a fs_context LSM blob * @fc: destination filesystem context @@@ -4410,7 -4396,7 +4410,7 @@@ void security_sk_clone(const struct soc } EXPORT_SYMBOL(security_sk_clone);
- void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic) + void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic) { call_void_hook(sk_getsecid, sk, &flic->flowic_secid); } diff --combined security/selinux/hooks.c index f88d659c2e59,2bdc48dd8670..3363716ee80a --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@@ -2745,27 -2745,6 +2745,27 @@@ static int selinux_umount(struct vfsmou FILESYSTEM__UNMOUNT, NULL); }
+static int selinux_fs_context_submount(struct fs_context *fc, + struct super_block *reference) +{ + const struct superblock_security_struct *sbsec; + struct selinux_mnt_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return -ENOMEM; + + sbsec = selinux_superblock(reference); + if (sbsec->flags & FSCONTEXT_MNT) + opts->fscontext_sid = sbsec->sid; + if (sbsec->flags & CONTEXT_MNT) + opts->context_sid = sbsec->mntpoint_sid; + if (sbsec->flags & DEFCONTEXT_MNT) + opts->defcontext_sid = sbsec->def_sid; + fc->security = opts; + return 0; +} + static int selinux_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { @@@ -3783,10 -3762,13 +3783,10 @@@ static int selinux_file_mprotect(struc if (default_noexec && (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { int rc = 0; - if (vma->vm_start >= vma->vm_mm->start_brk && - vma->vm_end <= vma->vm_mm->brk) { + if (vma_is_initial_heap(vma)) { rc = avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__EXECHEAP, NULL); - } else if (!vma->vm_file && - ((vma->vm_start <= vma->vm_mm->start_stack && - vma->vm_end >= vma->vm_mm->start_stack) || + } else if (!vma->vm_file && (vma_is_initial_stack(vma) || vma_is_stack_for_current(vma))) { rc = avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__EXECSTACK, NULL); @@@ -5185,12 -5167,12 +5185,12 @@@ static void selinux_sk_clone_security(c selinux_netlbl_sk_security_reset(newsksec); }
- static void selinux_sk_getsecid(struct sock *sk, u32 *secid) + static void selinux_sk_getsecid(const struct sock *sk, u32 *secid) { if (!sk) *secid = SECINITSID_ANY_SOCKET; else { - struct sk_security_struct *sksec = sk->sk_security; + const struct sk_security_struct *sksec = sk->sk_security;
*secid = sksec->sid; } @@@ -7200,7 -7182,6 +7200,7 @@@ static struct security_hook_list selinu /* * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE */ + LSM_HOOK_INIT(fs_context_submount, selinux_fs_context_submount), LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup), LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param), LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts), diff --combined tools/build/feature/Makefile index 3184f387990a,f0c5de018a95..dad79ede4e0a --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@@ -340,7 -340,7 +340,7 @@@ $(OUTPUT)test-jvmti-cmlr.bin $(BUILD)
$(OUTPUT)test-llvm.bin: - $(BUILDXX) -std=gnu++14 \ + $(BUILDXX) -std=gnu++17 \ -I$(shell $(LLVM_CONFIG) --includedir) \ -L$(shell $(LLVM_CONFIG) --libdir) \ $(shell $(LLVM_CONFIG) --libs Core BPF) \ @@@ -348,15 -348,17 +348,15 @@@ > $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-llvm-version.bin: - $(BUILDXX) -std=gnu++14 \ + $(BUILDXX) -std=gnu++17 \ -I$(shell $(LLVM_CONFIG) --includedir) \ > $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-clang.bin: - $(BUILDXX) -std=gnu++14 \ + $(BUILDXX) -std=gnu++17 \ -I$(shell $(LLVM_CONFIG) --includedir) \ -L$(shell $(LLVM_CONFIG) --libdir) \ - -Wl,--start-group -lclangBasic -lclangDriver \ - -lclangFrontend -lclangEdit -lclangLex \ - -lclangAST -Wl,--end-group \ + -Wl,--start-group -lclang-cpp -Wl,--end-group \ $(shell $(LLVM_CONFIG) --libs Core option) \ $(shell $(LLVM_CONFIG) --system-libs) \ > $(@:.bin=.make.output) 2>&1 @@@ -370,7 -372,7 +370,7 @@@ $(OUTPUT)test-libzstd.bin $(BUILD) -lzstd
$(OUTPUT)test-clang-bpf-co-re.bin: - $(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) | \ + $(CLANG) -S -g --target=bpf -o - $(patsubst %.bin,%.c,$(@F)) | \ grep BTF_KIND_VAR
$(OUTPUT)test-file-handle.bin: diff --combined tools/testing/selftests/Makefile index 8dca8acdb671,5c60a7cea732..2b96d4543afc --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@@ -8,6 -8,7 +8,7 @@@ TARGETS += cachesta TARGETS += capabilities TARGETS += cgroup TARGETS += clone3 + TARGETS += connector TARGETS += core TARGETS += cpufreq TARGETS += cpu-hotplug @@@ -18,7 -19,6 +19,7 @@@ TARGETS += drivers/net/bondin TARGETS += drivers/net/team TARGETS += efivarfs TARGETS += exec +TARGETS += fchmodat2 TARGETS += filesystems TARGETS += filesystems/binderfs TARGETS += filesystems/epoll