The following commit has been merged in the master branch: commit 6b2ccc19110159b8507689afa7385355b087446e Merge: 382a8d8b63c0a985f1873dd2eec75a29013412e4 dc1a9bf2c8169d9f607502162af1858a73a18cb8 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Sep 2 11:37:24 2020 +1000
Merge remote-tracking branch 'net-next/master' into master
# Conflicts: # drivers/net/ethernet/ibm/ibmvnic.c # net/ipv4/raw.c
diff --combined Documentation/admin-guide/kernel-parameters.txt index e464cf0b5025,8af893ef0d46..e03e4f8f0fb8 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@@ -599,17 -599,6 +599,17 @@@ altogether. For more information, see include/linux/dma-contiguous.h
+ cma_pernuma=nn[MG] + [ARM64,KNL] + Sets the size of kernel per-numa memory area for + contiguous memory allocations. A value of 0 disables + per-numa CMA altogether. And If this option is not + specificed, the default value is 0. + With per-numa CMA enabled, DMA users on node nid will + first try to allocate buffer from the pernuma area + which is located in node nid, if the allocation fails, + they will fallback to the global default memory area. + cmo_free_hint= [PPC] Format: { yes | no } Specify whether pages are marked as being inactive when they are freed. This is used in CMO environments @@@ -1349,6 -1338,11 +1349,11 @@@ Format: <interval>,<probability>,<space>,<times> See also Documentation/fault-injection/.
+ fb_tunnels= [NET] + Format: { initns | none } + See Documentation/admin-guide/sysctl/net.rst for + fb_tunnels_only_for_init_ns + floppy= [HW] See Documentation/admin-guide/blockdev/floppy.rst.
diff --combined MAINTAINERS index f3f018217cce,a1c15b6714a0..b52ae3746844 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1286,7 -1286,7 +1286,7 @@@ S: Supporte F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt F: drivers/net/ethernet/apm/xgene/ - F: drivers/net/phy/mdio-xgene.c + F: drivers/net/mdio/mdio-xgene.c
APPLIED MICRO (APM) X-GENE SOC PMU M: Khuong Dinh khuong@os.amperecomputing.com @@@ -2220,8 -2220,8 +2220,8 @@@ ARM/OPENMOKO NEO FREERUNNER (GTA02) MAC L: openmoko-kernel@lists.openmoko.org (subscribers-only) S: Orphan W: http://wiki.openmoko.org/wiki/Neo_FreeRunner -F: arch/arm/mach-s3c24xx/gta02.h -F: arch/arm/mach-s3c24xx/mach-gta02.c +F: arch/arm/mach-s3c/gta02.h +F: arch/arm/mach-s3c/mach-gta02.c
ARM/Orion SoC/Technologic Systems TS-78xx platform support M: Alexander Clouter alex@digriz.org.uk @@@ -2410,8 -2410,10 +2410,8 @@@ F: arch/arm/boot/dts/exynos F: arch/arm/boot/dts/s3c* F: arch/arm/boot/dts/s5p* F: arch/arm/mach-exynos*/ -F: arch/arm/mach-s3c24*/ -F: arch/arm/mach-s3c64xx/ +F: arch/arm/mach-s3c/ F: arch/arm/mach-s5p*/ -F: arch/arm/plat-samsung/ F: arch/arm64/boot/dts/exynos/ F: drivers/*/*/*s3c24* F: drivers/*/*s3c24* @@@ -2422,9 -2424,6 +2422,9 @@@ F: drivers/soc/samsung F: drivers/tty/serial/samsung* F: include/linux/soc/samsung/ N: exynos +N: s3c2410 +N: s3c64xx +N: s5pv210
ARM/SAMSUNG MOBILE MACHINE SUPPORT M: Kyungmin Park kyungmin.park@samsung.com @@@ -3206,7 -3205,6 +3206,7 @@@ S: Maintaine T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: block/ F: drivers/block/ +F: include/linux/blk* F: kernel/trace/blktrace.c F: lib/sbitmap.c
@@@ -3390,7 -3388,6 +3390,7 @@@ M: Florian Fainelli <f.fainelli@gmail.c L: netdev@vger.kernel.org L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Supported +F: Documentation/devicetree/bindings/net/dsa/b53.txt F: drivers/net/dsa/b53/* F: include/linux/platform_data/b53.h
@@@ -3576,28 -3573,13 +3576,28 @@@ L: bcm-kernel-feedback-list@broadcom.co S: Maintained F: drivers/phy/broadcom/phy-brcm-usb*
+BROADCOM ETHERNET PHY DRIVERS +M: Florian Fainelli f.fainelli@gmail.com +L: bcm-kernel-feedback-list@broadcom.com +L: netdev@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt +F: drivers/net/phy/bcm*.[ch] +F: drivers/net/phy/broadcom.c +F: include/linux/brcmphy.h + BROADCOM GENET ETHERNET DRIVER M: Doug Berger opendmb@gmail.com M: Florian Fainelli f.fainelli@gmail.com L: bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported +F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt +F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt F: drivers/net/ethernet/broadcom/genet/ +F: drivers/net/mdio/mdio-bcm-unimac.c +F: include/linux/platform_data/bcmgenet.h +F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE M: Ray Jui rjui@broadcom.com @@@ -4265,7 -4247,6 +4265,7 @@@ W: https://clangbuiltlinux.github.io B: https://github.com/ClangBuiltLinux/linux/issues C: irc://chat.freenode.net/clangbuiltlinux F: Documentation/kbuild/llvm.rst +F: scripts/clang-tools/ K: \b(?i:clang|llvm)\b
CLEANCACHE API @@@ -4711,6 -4692,15 +4711,15 @@@ S: Supporte W: http://www.chelsio.com F: drivers/crypto/chelsio
+ CXGB4 INLINE CRYPTO DRIVER + M: Ayush Sawal ayush.sawal@chelsio.com + M: Vinay Kumar Yadav vinay.yadav@chelsio.com + M: Rohit Maheshwari rohitm@chelsio.com + L: netdev@vger.kernel.org + S: Supported + W: http://www.chelsio.com + F: drivers/net/ethernet/chelsio/inline_crypto/ + CXGB4 ETHERNET DRIVER (CXGB4) M: Vishal Kulkarni vishal@chelsio.com L: netdev@vger.kernel.org @@@ -5258,7 -5248,6 +5267,7 @@@ DOCUMENTATIO M: Jonathan Corbet corbet@lwn.net L: linux-doc@vger.kernel.org S: Maintained +P: Documentation/doc-guide/maintainer-profile.rst T: git git://git.lwn.net/linux.git docs-next F: Documentation/ F: scripts/documentation-file-ref-check @@@ -6514,6 -6503,7 +6523,6 @@@ F: net/bridge
ETHERNET PHY LIBRARY M: Andrew Lunn andrew@lunn.ch -M: Florian Fainelli f.fainelli@gmail.com M: Heiner Kallweit hkallweit1@gmail.com R: Russell King linux@armlinux.org.uk L: netdev@vger.kernel.org @@@ -6523,11 -6513,14 +6532,14 @@@ F: Documentation/devicetree/bindings/ne F: Documentation/devicetree/bindings/net/mdio* F: Documentation/devicetree/bindings/net/qca,ar803x.yaml F: Documentation/networking/phy.rst + F: drivers/net/mdio/ + F: drivers/net/pcs/ F: drivers/net/phy/ F: drivers/of/of_mdio.c F: drivers/of/of_net.c F: include/dt-bindings/net/qca-ar803x.h F: include/linux/*mdio*.h + F: include/linux/mdio/*.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h @@@ -6903,14 -6896,6 +6915,14 @@@ L: linuxppc-dev@lists.ozlabs.or S: Maintained F: drivers/dma/fsldma.*
+FREESCALE DSPI DRIVER +M: Vladimir Oltean olteanv@gmail.com +L: linux-spi@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt +F: drivers/spi/spi-fsl-dspi.c +F: include/linux/spi/spi-fsl-dspi.h + FREESCALE ENETC ETHERNET DRIVERS M: Claudiu Manoil claudiu.manoil@nxp.com L: netdev@vger.kernel.org @@@ -7199,7 -7184,7 +7211,7 @@@ FUSE: FILESYSTEM IN USERSPAC M: Miklos Szeredi miklos@szeredi.hu L: linux-fsdevel@vger.kernel.org S: Maintained -W: http://fuse.sourceforge.net/ +W: https://github.com/libfuse/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git F: Documentation/filesystems/fuse.rst F: fs/fuse/ @@@ -8858,7 -8843,7 +8870,7 @@@ INTEL IPU3 CSI-2 CIO2 DRIVE M: Yong Zhi yong.zhi@intel.com M: Sakari Ailus sakari.ailus@linux.intel.com M: Bingbu Cao bingbu.cao@intel.com -R: Tian Shu Qiu tian.shu.qiu@intel.com +R: Tianshu Qiu tian.shu.qiu@intel.com L: linux-media@vger.kernel.org S: Maintained F: Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst @@@ -8867,7 -8852,7 +8879,7 @@@ F: drivers/media/pci/intel/ipu3 INTEL IPU3 CSI-2 IMGU DRIVER M: Sakari Ailus sakari.ailus@linux.intel.com R: Bingbu Cao bingbu.cao@intel.com -R: Tian Shu Qiu tian.shu.qiu@intel.com +R: Tianshu Qiu tian.shu.qiu@intel.com L: linux-media@vger.kernel.org S: Maintained F: Documentation/admin-guide/media/ipu3.rst @@@ -10308,6 -10293,13 +10320,13 @@@ S: Maintaine W: http://linux-test-project.github.io/ T: git git://github.com/linux-test-project/ltp.git
+ LYNX PCS MODULE + M: Ioana Ciornei ioana.ciornei@nxp.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/phy/pcs-lynx.c + F: include/linux/pcs-lynx.h + M68K ARCHITECTURE M: Geert Uytterhoeven geert@linux-m68k.org L: linux-m68k@lists.linux-m68k.org @@@ -10515,7 -10507,7 +10534,7 @@@ M: Tobias Waldekranz <tobias@waldekranz L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/marvell,mvusb.yaml - F: drivers/net/phy/mdio-mvusb.c + F: drivers/net/mdio/mdio-mvusb.c
MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER M: Hu Ziji huziji@marvell.com @@@ -12768,7 -12760,7 +12787,7 @@@ T: git git://linuxtv.org/media_tree.gi F: drivers/media/i2c/ov2685.c
OMNIVISION OV2740 SENSOR DRIVER -M: Tianshu Qiu tian.shu.qiua@intel.com +M: Tianshu Qiu tian.shu.qiu@intel.com R: Shawn Tu shawnx.tu@intel.com R: Bingbu Cao bingbu.cao@intel.com L: linux-media@vger.kernel.org @@@ -12784,12 -12776,10 +12803,12 @@@ T: git git://linuxtv.org/media_tree.gi F: drivers/media/i2c/ov5640.c
OMNIVISION OV5647 SENSOR DRIVER -M: Luis Oliveira lolivei@synopsys.com +M: Dave Stevenson dave.stevenson@raspberrypi.com +M: Jacopo Mondi jacopo@jmondi.org L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/ov5647.yaml F: drivers/media/i2c/ov5647.c
OMNIVISION OV5670 SENSOR DRIVER @@@ -13953,7 -13943,6 +13972,7 @@@ PRINT M: Petr Mladek pmladek@suse.com M: Sergey Senozhatsky sergey.senozhatsky@gmail.com R: Steven Rostedt rostedt@goodmis.org +R: John Ogness john.ogness@linutronix.de S: Maintained F: include/linux/printk.h F: kernel/printk/ @@@ -14592,9 -14581,9 +14611,9 @@@ M: Niklas S��derlund <niklas.soderlund+ L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml -F: drivers/media/i2c/rdacm20.c F: drivers/media/i2c/max9271.c F: drivers/media/i2c/max9271.h +F: drivers/media/i2c/rdacm20.c
RDC R-321X SoC M: Florian Fainelli florian@openwrt.org @@@ -14888,7 -14877,6 +14907,7 @@@ F: include/linux/hid-roccat
ROCKCHIP ISP V1 DRIVER M: Helen Koike helen.koike@collabora.com +M: Dafna Hirschfeld dafna.hirschfeld@collabora.com L: linux-media@vger.kernel.org S: Maintained F: drivers/staging/media/rkisp1/ @@@ -15332,8 -15320,6 +15351,8 @@@ F: Documentation/devicetree/bindings/cl F: Documentation/devicetree/bindings/clock/samsung,s5p* F: drivers/clk/samsung/ F: include/dt-bindings/clock/exynos*.h +F: include/linux/clk/samsung.h +F: include/linux/platform_data/clk-s3c2410.h
SAMSUNG SPI DRIVERS M: Kukjin Kim kgene@kernel.org @@@ -15345,7 -15331,6 +15364,7 @@@ S: Maintaine F: Documentation/devicetree/bindings/spi/spi-samsung.txt F: drivers/spi/spi-s3c* F: include/linux/platform_data/spi-s3c64xx.h +F: include/linux/spi/s3c24xx-fiq.h
SAMSUNG SXGBE DRIVERS M: Byungho An bh74.an@samsung.com @@@ -15671,6 -15656,7 +15690,7 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/phy/phylink.c F: drivers/net/phy/sfp* + F: include/linux/mdio/mdio-i2c.h F: include/linux/phylink.h F: include/linux/sfp.h K: phylink.h|struct\s+phylink|.phylink|>phylink_|phylink_(autoneg|clear|connect|create|destroy|disconnect|ethtool|helper|mac|mii|of|set|start|stop|test|validate) @@@ -15859,17 -15845,19 +15879,17 @@@ F: drivers/video/fbdev/simplefb. F: include/linux/platform_data/simplefb.h
SIMTEC EB110ATX (Chalice CATS) -M: Vincent Sanders vince@simtec.co.uk M: Simtec Linux Team linux@simtec.co.uk S: Supported W: http://www.simtec.co.uk/products/EB110ATX/
SIMTEC EB2410ITX (BAST) -M: Vincent Sanders vince@simtec.co.uk M: Simtec Linux Team linux@simtec.co.uk S: Supported W: http://www.simtec.co.uk/products/EB2410ITX/ -F: arch/arm/mach-s3c24xx/bast-ide.c -F: arch/arm/mach-s3c24xx/bast-irq.c -F: arch/arm/mach-s3c24xx/mach-bast.c +F: arch/arm/mach-s3c/bast-ide.c +F: arch/arm/mach-s3c/bast-irq.c +F: arch/arm/mach-s3c/mach-bast.c
SIOX M: Thorsten Scherer t.scherer@eckelmann.de @@@ -16075,6 -16063,7 +16095,6 @@@ F: include/uapi/rdma/rdma_user_rxe. SOFTLOGIC 6x10 MPEG CODEC M: Bluecherry Maintainers maintainers@bluecherrydvr.com M: Anton Sviridenko anton@corp.bluecherry.net -M: Andrey Utkin andrey.utkin@corp.bluecherry.net M: Andrey Utkin andrey_utkin@fastmail.com M: Ismael Luceno ismael@iodev.co.uk L: linux-media@vger.kernel.org @@@ -16752,8 -16741,8 +16772,8 @@@ SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVE M: Jose Abreu Jose.Abreu@synopsys.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/phy/mdio-xpcs.c - F: include/linux/mdio-xpcs.h + F: drivers/net/pcs/pcs-xpcs.c + F: include/linux/pcs/pcs-xpcs.h
SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula jarkko.nikula@linux.intel.com @@@ -17147,8 -17136,8 +17167,8 @@@ S: Maintaine F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt -F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt -F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt +F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml +F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt F: drivers/clk/keystone/sci-clk.c @@@ -18328,8 -18317,10 +18348,8 @@@ S: Maintaine F: drivers/media/platform/video-mux.c
VIDEOBUF2 FRAMEWORK -M: Pawel Osciak pawel@osciak.com +M: Tomasz Figa tfiga@chromium.org M: Marek Szyprowski m.szyprowski@samsung.com -M: Kyungmin Park kyungmin.park@samsung.com -R: Tomasz Figa tfiga@chromium.org L: linux-media@vger.kernel.org S: Maintained F: drivers/media/common/videobuf2/* @@@ -18785,7 -18776,7 +18805,7 @@@ F: Documentation/devicetree/bindings/mf F: Documentation/devicetree/bindings/regulator/wlf,arizona.yaml F: Documentation/devicetree/bindings/sound/wlf,arizona.yaml F: Documentation/hwmon/wm83??.rst -F: arch/arm/mach-s3c64xx/mach-crag6410* +F: arch/arm/mach-s3c/mach-crag6410* F: drivers/clk/clk-wm83*.c F: drivers/extcon/extcon-arizona.c F: drivers/gpio/gpio-*wm*.c @@@ -18903,15 -18894,6 +18923,15 @@@ S: Maintaine T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core F: arch/x86/platform
+X86 PLATFORM UV HPE SUPERDOME FLEX +M: Steve Wahl steve.wahl@hpe.com +R: Dimitri Sivanich dimitri.sivanich@hpe.com +R: Russ Anderson russ.anderson@hpe.com +S: Supported +F: arch/x86/include/asm/uv/ +F: arch/x86/kernel/apic/x2apic_uv_x.c +F: arch/x86/platform/uv/ + X86 VDSO M: Andy Lutomirski luto@kernel.org L: linux-kernel@vger.kernel.org diff --combined drivers/net/dsa/mv88e6xxx/chip.c index f0dbc05e30a4,895d7b6dba2d..15b97a4f8d93 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@@ -875,7 -875,7 +875,7 @@@ static uint64_t _mv88e6xxx_get_ethtool_ break; case STATS_TYPE_BANK1: reg = bank1_select; - /* fall through */ + fallthrough; case STATS_TYPE_BANK0: reg |= s->reg | histogram; mv88e6xxx_g1_stats_read(chip, reg, &low); @@@ -3329,12 -3329,6 +3329,6 @@@ static int mv88e6xxx_mdio_register(stru return 0; }
- static const struct of_device_id mv88e6xxx_mdio_external_match[] = { - { .compatible = "marvell,mv88e6xxx-mdio-external", - .data = (void *)true }, - { }, - }; - static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{ @@@ -3354,7 -3348,6 +3348,6 @@@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, struct device_node *np) { - const struct of_device_id *match; struct device_node *child; int err;
@@@ -3372,8 -3365,8 +3365,8 @@@ * bus. */ for_each_available_child_of_node(np, child) { - match = of_match_node(mv88e6xxx_mdio_external_match, child); - if (match) { + if (of_device_is_compatible( + child, "marvell,mv88e6xxx-mdio-external")) { err = mv88e6xxx_mdio_register(chip, child, true); if (err) { mv88e6xxx_mdios_unregister(chip); diff --combined drivers/net/dsa/ocelot/felix.c index 04bfa6e465ff,ccc0427faf02..a1e1d3824110 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@@ -19,6 -19,7 +19,7 @@@ #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> + #include <linux/pcs-lynx.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include "felix.h" @@@ -196,27 -197,16 +197,16 @@@ static void felix_phylink_validate(stru felix->info->phylink_validate(ocelot, port, supported, state); }
- static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) - { - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - - if (felix->info->pcs_link_state) - felix->info->pcs_link_state(ocelot, port, state); - - return 0; - } - static void felix_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int link_an_mode, const struct phylink_link_state *state) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->info->pcs_config) - felix->info->pcs_config(ocelot, port, link_an_mode, state); + if (felix->pcs[port]) + phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); }
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, @@@ -306,10 -296,6 +296,6 @@@ static void felix_phylink_mac_link_up(s ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- if (felix->info->pcs_link_up) - felix->info->pcs_link_up(ocelot, port, link_an_mode, interface, - speed, duplex); - if (felix->info->port_sched_speed_set) felix->info->port_sched_speed_set(ocelot, port, speed); } @@@ -400,7 -386,6 +386,7 @@@ static int felix_parse_ports_node(struc if (err < 0) { dev_err(dev, "Unsupported PHY mode %s on port %d\n", phy_modes(phy_mode), port); + of_node_put(child); return err; }
@@@ -627,11 -612,6 +613,6 @@@ static int felix_setup(struct dsa_switc
ds->mtu_enforcement_ingress = true; ds->configure_vlan_while_not_filtering = true; - /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040) - * isn't instantiated for the Felix PF. - * In-band AN may take a few ms to complete, so we need to poll. - */ - ds->pcs_poll = true;
return 0; } @@@ -787,7 -767,6 +768,6 @@@ const struct dsa_switch_ops felix_switc .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, .phylink_validate = felix_phylink_validate, - .phylink_mac_link_state = felix_phylink_mac_pcs_get_state, .phylink_mac_config = felix_phylink_mac_config, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, diff --combined drivers/net/ethernet/8390/axnet_cs.c index a00b36f91d9f,a001bc902359..2488bfdb9133 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@@ -610,7 -610,7 +610,7 @@@ static int axnet_ioctl(struct net_devic switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* Fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; @@@ -657,8 -657,10 +657,10 @@@ static void block_input(struct net_devi outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
insw(nic_base + AXNET_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++; + if (count & 0x01) { + buf[count-1] = inb(nic_base + AXNET_DATAPORT); + xfer_count++; + }
}
@@@ -1270,10 -1272,12 +1272,12 @@@ static void ei_tx_intr(struct net_devic ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); netif_trans_update(dev); - ei_local->tx2 = -1, + ei_local->tx2 = -1; ei_local->lasttx = 2; + } else { + ei_local->lasttx = 20; + ei_local->txing = 0; } - else ei_local->lasttx = 20, ei_local->txing = 0; } else if (ei_local->tx2 < 0) { @@@ -1289,9 -1293,10 +1293,10 @@@ netif_trans_update(dev); ei_local->tx1 = -1; ei_local->lasttx = 1; + } else { + ei_local->lasttx = 10; + ei_local->txing = 0; } - else - ei_local->lasttx = 10, ei_local->txing = 0; } // else // netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", diff --combined drivers/net/ethernet/8390/pcnet_cs.c index 164c3ed550bf,c383f16889f4..9d3b1e0e425c --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@@ -1108,7 -1108,7 +1108,7 @@@ static int ei_ioctl(struct net_device * switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; @@@ -1178,8 -1178,10 +1178,10 @@@ static void dma_block_input(struct net_ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
insw(nic_base + PCNET_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++; + if (count & 0x01) { + buf[count-1] = inb(nic_base + PCNET_DATAPORT); + xfer_count++; + }
/* This was for the ALPHA version only, but enough people have been encountering problems that it is still here. */ diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fa3367966f4b,e49370f9d59b..98d01a7497ec --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@@ -4745,9 -4745,11 +4745,11 @@@ static void le_intr_handler(struct adap static struct intr_info t6_le_intr_info[] = { { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { CMDTIDERR_F, "LE cmd tid error", -1, 1 }, { TCAMINTPERR_F, "LE parity error", -1, 1 }, { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 }, { 0 } };
@@@ -7656,13 -7658,13 +7658,13 @@@ int t4_alloc_vi(struct adapter *adap, u switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); - /* Fall through */ + fallthrough; case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); - /* Fall through */ + fallthrough; case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); - /* Fall through */ + fallthrough; case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } diff --combined drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index cf5383bb8331,cb3083d2b4ab..ceaf76158e23 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@@ -40,9 -40,9 +40,9 @@@ static void *dpaa2_iova_to_virt(struct return phys_to_virt(phys_addr); }
- static void validate_rx_csum(struct dpaa2_eth_priv *priv, - u32 fd_status, - struct sk_buff *skb) + static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) { skb_checksum_none_assert(skb);
@@@ -62,9 -62,9 +62,9 @@@ /* Free a received FD. * Not to be used for Tx conf FDs or on any other paths. */ - static void free_rx_fd(struct dpaa2_eth_priv *priv, - const struct dpaa2_fd *fd, - void *vaddr) + static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, + void *vaddr) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t addr = dpaa2_fd_get_addr(fd); @@@ -100,9 -100,9 +100,9 @@@ free_buf }
/* Build a linear skb based on a single-buffer frame descriptor */ - static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - void *fd_vaddr) + static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) { struct sk_buff *skb = NULL; u16 fd_offset = dpaa2_fd_get_offset(fd); @@@ -121,9 -121,9 +121,9 @@@ }
/* Build a non linear (fragmented) skb based on a S/G table */ - static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_sg_entry *sgt) + static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_sg_entry *sgt) { struct sk_buff *skb = NULL; struct device *dev = priv->net_dev->dev.parent; @@@ -204,7 -204,8 +204,8 @@@ /* Free buffers acquired from the buffer pool or which were meant to * be released in the pool */ - static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) + static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, + int count) { struct device *dev = priv->net_dev->dev.parent; void *vaddr; @@@ -218,9 -219,9 +219,9 @@@ } }
- static void xdp_release_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) + static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; @@@ -238,7 -239,7 +239,7 @@@ }
if (err) { - free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); + dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); ch->buf_count -= ch->xdp.drop_cnt; }
@@@ -274,9 -275,9 +275,9 @@@ static int dpaa2_eth_xdp_flush(struct d return total_enqueued; }
- static void xdp_tx_flush(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_eth_fq *fq) + static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq) { struct rtnl_link_stats64 *percpu_stats; struct dpaa2_fd *fds; @@@ -295,17 -296,17 +296,17 @@@ ch->stats.xdp_tx++; } for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { - xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); + dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); percpu_stats->tx_errors++; ch->stats.xdp_tx_err++; } fq->xdp_tx_fds.num = 0; }
- static void xdp_enqueue(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_fd *fd, - void *buf_start, u16 queue_id) + static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) { struct dpaa2_faead *faead; struct dpaa2_fd *dest_fd; @@@ -333,13 -334,13 +334,13 @@@ if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) return;
- xdp_tx_flush(priv, ch, fq); + dpaa2_eth_xdp_tx_flush(priv, ch, fq); }
- static u32 run_xdp(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_eth_fq *rx_fq, - struct dpaa2_fd *fd, void *vaddr) + static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) { dma_addr_t addr = dpaa2_fd_get_addr(fd); struct bpf_prog *xdp_prog; @@@ -372,16 -373,16 +373,16 @@@ case XDP_PASS: break; case XDP_TX: - xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); + dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); break; default: bpf_warn_invalid_xdp_action(xdp_act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); - /* fall through */ + fallthrough; case XDP_DROP: - xdp_release_buf(priv, ch, addr); + dpaa2_eth_xdp_release_buf(priv, ch, addr); ch->stats.xdp_drop++; break; case XDP_REDIRECT: @@@ -441,7 -442,7 +442,7 @@@ static void dpaa2_eth_rx(struct dpaa2_e percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) { - xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); if (xdp_act != XDP_PASS) { percpu_stats->rx_packets++; percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); @@@ -450,13 -451,13 +451,13 @@@
dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - skb = build_linear_skb(ch, fd, vaddr); + skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - skb = build_frag_skb(priv, ch, buf_data); + skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); free_pages((unsigned long)vaddr, 0); percpu_extras->rx_sg_frames++; percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); @@@ -485,7 -486,7 +486,7 @@@ /* Check if we need to validate the L4 csum */ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { status = le32_to_cpu(fas->status); - validate_rx_csum(priv, status, skb); + dpaa2_eth_validate_rx_csum(priv, status, skb); }
skb->protocol = eth_type_trans(skb, priv->net_dev); @@@ -499,7 -500,7 +500,7 @@@ return;
err_build_skb: - free_rx_fd(priv, fd, vaddr); + dpaa2_eth_free_rx_fd(priv, fd, vaddr); err_frame_format: percpu_stats->rx_dropped++; } @@@ -510,8 -511,8 +511,8 @@@ * * Observance of NAPI budget is not our concern, leaving that to the caller. */ - static int consume_frames(struct dpaa2_eth_channel *ch, - struct dpaa2_eth_fq **src) + static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq **src) { struct dpaa2_eth_priv *priv = ch->priv; struct dpaa2_eth_fq *fq = NULL; @@@ -560,7 -561,7 +561,7 @@@ }
/* Configure the egress frame annotation for timestamp update */ - static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) + static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) { struct dpaa2_faead *faead; u32 ctrl, frc; @@@ -582,9 -583,9 +583,9 @@@ }
/* Create a frame descriptor based on a fragmented skb */ - static int build_sg_fd(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, - struct dpaa2_fd *fd) + static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) { struct device *dev = priv->net_dev->dev.parent; void *sgt_buf = NULL; @@@ -673,7 -674,7 +674,7 @@@ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - enable_tx_tstamp(fd, sgt_buf); + dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
return 0;
@@@ -692,9 -693,9 +693,9 @@@ dma_map_sg_failed * enough for the HW requirements, thus instead of realloc-ing the skb we * create a SG frame descriptor with only one entry. */ - static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, - struct dpaa2_fd *fd) + static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) { struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_sgt_cache *sgt_cache; @@@ -751,7 -752,7 +752,7 @@@ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - enable_tx_tstamp(fd, sgt_buf); + dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
return 0;
@@@ -767,9 -768,9 +768,9 @@@ data_map_failed }
/* Create a frame descriptor based on a linear skb */ - static int build_single_fd(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, - struct dpaa2_fd *fd) + static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) { struct device *dev = priv->net_dev->dev.parent; u8 *buffer_start, *aligned_start; @@@ -807,7 -808,7 +808,7 @@@ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - enable_tx_tstamp(fd, buffer_start); + dpaa2_eth_enable_tx_tstamp(fd, buffer_start);
return 0; } @@@ -819,9 -820,9 +820,9 @@@ * This can be called either from dpaa2_eth_tx_conf() or on the error path of * dpaa2_eth_tx(). */ - static void free_tx_fd(const struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq, - const struct dpaa2_fd *fd, bool in_napi) + static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr, sg_addr; @@@ -954,17 -955,17 +955,17 @@@ static netdev_tx_t dpaa2_eth_tx(struct memset(&fd, 0, sizeof(fd));
if (skb_is_nonlinear(skb)) { - err = build_sg_fd(priv, skb, &fd); + err = dpaa2_eth_build_sg_fd(priv, skb, &fd); percpu_extras->tx_sg_frames++; percpu_extras->tx_sg_bytes += skb->len; } else if (skb_headroom(skb) < needed_headroom) { - err = build_sg_fd_single_buf(priv, skb, &fd); + err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd); percpu_extras->tx_sg_frames++; percpu_extras->tx_sg_bytes += skb->len; percpu_extras->tx_converted_sg_frames++; percpu_extras->tx_converted_sg_bytes += skb->len; } else { - err = build_single_fd(priv, skb, &fd); + err = dpaa2_eth_build_single_fd(priv, skb, &fd); }
if (unlikely(err)) { @@@ -1010,7 -1011,7 +1011,7 @@@ if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - free_tx_fd(priv, fq, &fd, false); + dpaa2_eth_free_tx_fd(priv, fq, &fd, false); netdev_tx_completed_queue(nq, 1, fd_len); } else { percpu_stats->tx_packets++; @@@ -1045,7 -1046,7 +1046,7 @@@ static void dpaa2_eth_tx_conf(struct dp
/* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - free_tx_fd(priv, fq, fd, true); + dpaa2_eth_free_tx_fd(priv, fq, fd, true);
if (likely(!fd_errors)) return; @@@ -1059,7 -1060,7 +1060,7 @@@ percpu_stats->tx_errors++; }
- static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) + static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) { int err;
@@@ -1082,7 -1083,7 +1083,7 @@@ return 0; }
- static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) + static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) { int err;
@@@ -1106,8 -1107,8 +1107,8 @@@ /* Perform a single release command to add buffers * to the specified buffer pool */ - static int add_bufs(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, u16 bpid) + static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, u16 bpid) { struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; @@@ -1155,7 -1156,7 +1156,7 @@@ release_bufs * not much else we can do about it */ if (err) { - free_bufs(priv, buf_array, i); + dpaa2_eth_free_bufs(priv, buf_array, i); return 0; }
@@@ -1173,7 -1174,7 +1174,7 @@@ err_alloc return 0; }
- static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) + static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) { int i, j; int new_count; @@@ -1181,7 -1182,7 +1182,7 @@@ for (j = 0; j < priv->num_channels; j++) { for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { - new_count = add_bufs(priv, priv->channel[j], bpid); + new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) { @@@ -1197,7 -1198,7 +1198,7 @@@ * Drain the specified number of buffers from the DPNI's private buffer pool. * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD */ - static void drain_bufs(struct dpaa2_eth_priv *priv, int count) + static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) { u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; int retries = 0; @@@ -1213,17 -1214,17 +1214,17 @@@ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - free_bufs(priv, buf_array, ret); + dpaa2_eth_free_bufs(priv, buf_array, ret); retries = 0; } while (ret); }
- static void drain_pool(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) { int i;
- drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); - drain_bufs(priv, 1); + dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); + dpaa2_eth_drain_bufs(priv, 1);
for (i = 0; i < priv->num_channels; i++) priv->channel[i]->buf_count = 0; @@@ -1232,9 -1233,9 +1233,9 @@@ /* Function is called from softirq context only, so we don't need to guard * the access to percpu count */ - static int refill_pool(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - u16 bpid) + static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + u16 bpid) { int new_count;
@@@ -1242,7 -1243,7 +1243,7 @@@ return 0;
do { - new_count = add_bufs(priv, ch, bpid); + new_count = dpaa2_eth_add_bufs(priv, ch, bpid); if (unlikely(!new_count)) { /* Out of memory; abort for now, we'll try later on */ break; @@@ -1272,7 -1273,7 +1273,7 @@@ static void dpaa2_eth_sgt_cache_drain(s } }
- static int pull_channel(struct dpaa2_eth_channel *ch) + static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) { int err; int dequeues = -1; @@@ -1319,14 -1320,14 +1320,14 @@@ static int dpaa2_eth_poll(struct napi_s ch->rx_list = &rx_list;
do { - err = pull_channel(ch); + err = dpaa2_eth_pull_channel(ch); if (unlikely(err)) break;
/* Refill pool if appropriate */ - refill_pool(priv, ch, priv->bpid); + dpaa2_eth_refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &fq); + store_cleaned = dpaa2_eth_consume_frames(ch, &fq); if (store_cleaned <= 0) break; if (fq->type == DPAA2_RX_FQ) { @@@ -1375,12 -1376,12 +1376,12 @@@ out if (ch->xdp.res & XDP_REDIRECT) xdp_do_flush_map(); else if (rx_cleaned && ch->xdp.res & XDP_TX) - xdp_tx_flush(priv, ch, &priv->fq[flowid]); + dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done; }
- static void enable_ch_napi(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_channel *ch; int i; @@@ -1391,7 -1392,7 +1392,7 @@@ } }
- static void disable_ch_napi(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_channel *ch; int i; @@@ -1465,7 -1466,7 +1466,7 @@@ set_cgtd priv->rx_cgtd_enabled = td.enable; }
- static int link_state_update(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) { struct dpni_link_state state = {0}; bool tx_pause; @@@ -1517,7 -1518,7 +1518,7 @@@ static int dpaa2_eth_open(struct net_de struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int err;
- err = seed_pool(priv, priv->bpid); + err = dpaa2_eth_seed_pool(priv, priv->bpid); if (err) { /* Not much to do; the buffer pool, though not filled up, * may still contain some buffers which would enable us @@@ -1541,7 -1542,7 +1542,7 @@@ */ netif_carrier_off(net_dev); } - enable_ch_napi(priv); + dpaa2_eth_enable_ch_napi(priv);
err = dpni_enable(priv->mc_io, 0, priv->mc_token); if (err < 0) { @@@ -1553,7 -1554,7 +1554,7 @@@ /* If the DPMAC object has already processed the link up * interrupt, we have to learn the link state ourselves. */ - err = link_state_update(priv); + err = dpaa2_eth_link_state_update(priv); if (err < 0) { netdev_err(net_dev, "Can't update link state\n"); goto link_state_err; @@@ -1566,13 -1567,13 +1567,13 @@@
link_state_err: enable_err: - disable_ch_napi(priv); - drain_pool(priv); + dpaa2_eth_disable_ch_napi(priv); + dpaa2_eth_drain_pool(priv); return err; }
/* Total number of in-flight frames on ingress queues */ - static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) + static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_fq *fq; u32 fcnt = 0, bcnt = 0, total = 0; @@@ -1591,13 -1592,13 +1592,13 @@@ return total; }
- static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) { int retries = 10; u32 pending;
do { - pending = ingress_fq_count(priv); + pending = dpaa2_eth_ingress_fq_count(priv); if (pending) msleep(100); } while (pending && --retries); @@@ -1605,7 -1606,7 +1606,7 @@@
#define DPNI_TX_PENDING_VER_MAJOR 7 #define DPNI_TX_PENDING_VER_MINOR 13 - static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) { union dpni_statistics stats; int retries = 10; @@@ -1651,7 -1652,7 +1652,7 @@@ static int dpaa2_eth_stop(struct net_de * on WRIOP. After it finishes, wait until all remaining frames on Rx * and Tx conf queues are consumed on NAPI poll. */ - wait_for_egress_fq_empty(priv); + dpaa2_eth_wait_for_egress_fq_empty(priv);
do { dpni_disable(priv->mc_io, 0, priv->mc_token); @@@ -1667,11 -1668,11 +1668,11 @@@ */ }
- wait_for_ingress_fq_empty(priv); - disable_ch_napi(priv); + dpaa2_eth_wait_for_ingress_fq_empty(priv); + dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */ - drain_pool(priv); + dpaa2_eth_drain_pool(priv);
/* Empty the Scatter-Gather Buffer cache */ dpaa2_eth_sgt_cache_drain(priv); @@@ -1725,8 -1726,8 +1726,8 @@@ static void dpaa2_eth_get_stats(struct /* Copy mac unicast addresses from @net_dev to @priv. * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. */ - static void add_uc_hw_addr(const struct net_device *net_dev, - struct dpaa2_eth_priv *priv) + static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) { struct netdev_hw_addr *ha; int err; @@@ -1744,8 -1745,8 +1745,8 @@@ /* Copy mac multicast addresses from @net_dev to @priv * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. */ - static void add_mc_hw_addr(const struct net_device *net_dev, - struct dpaa2_eth_priv *priv) + static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) { struct netdev_hw_addr *ha; int err; @@@ -1810,7 -1811,7 +1811,7 @@@ static void dpaa2_eth_set_rx_mode(struc err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); if (err) netdev_warn(net_dev, "Can't clear uc filters\n"); - add_uc_hw_addr(net_dev, priv); + dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Finally, clear uc promisc and set mc promisc as requested. */ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); @@@ -1833,8 -1834,8 +1834,8 @@@ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); if (err) netdev_warn(net_dev, "Can't clear mac filters\n"); - add_mc_hw_addr(net_dev, priv); - add_uc_hw_addr(net_dev, priv); + dpaa2_eth_add_mc_hw_addr(net_dev, priv); + dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Now we can clear both ucast and mcast promisc, without risking * to drop legitimate frames anymore. @@@ -1868,14 -1869,14 +1869,14 @@@ static int dpaa2_eth_set_features(struc
if (changed & NETIF_F_RXCSUM) { enable = !!(features & NETIF_F_RXCSUM); - err = set_rx_csum(priv, enable); + err = dpaa2_eth_set_rx_csum(priv, enable); if (err) return err; }
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); - err = set_tx_csum(priv, enable); + err = dpaa2_eth_set_tx_csum(priv, enable); if (err) return err; } @@@ -1944,7 -1945,7 +1945,7 @@@ static bool xdp_mtu_valid(struct dpaa2_ return true; }
- static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) + static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) { int mfl, err;
@@@ -1978,7 -1979,7 +1979,7 @@@ static int dpaa2_eth_change_mtu(struct if (!xdp_mtu_valid(priv, new_mtu)) return -EINVAL;
- err = set_rx_mfl(priv, new_mtu, true); + err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true); if (err) return err;
@@@ -1987,7 -1988,7 +1988,7 @@@ out return 0; }
- static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) + static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) { struct dpni_buffer_layout buf_layout = {0}; int err; @@@ -2013,7 -2014,7 +2014,7 @@@ return 0; }
- static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) + static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) { struct dpaa2_eth_priv *priv = netdev_priv(dev); struct dpaa2_eth_channel *ch; @@@ -2039,10 -2040,10 +2040,10 @@@ * so we are sure no old format buffers will be used from now on. */ if (need_update) { - err = set_rx_mfl(priv, dev->mtu, !!prog); + err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog); if (err) goto out_err; - err = update_rx_buffer_headroom(priv, !!prog); + err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog); if (err) goto out_err; } @@@ -2079,7 -2080,7 +2080,7 @@@ static int dpaa2_eth_xdp(struct net_dev { switch (xdp->command) { case XDP_SETUP_PROG: - return setup_xdp(dev, xdp->prog); + return dpaa2_eth_setup_xdp(dev, xdp->prog); default: return -EINVAL; } @@@ -2316,7 -2317,7 +2317,7 @@@ static const struct net_device_ops dpaa .ndo_setup_tc = dpaa2_eth_setup_tc, };
- static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) + static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) { struct dpaa2_eth_channel *ch;
@@@ -2329,7 -2330,7 +2330,7 @@@ }
/* Allocate and configure a DPCON object */ - static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) + static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) { struct fsl_mc_device *dpcon; struct device *dev = priv->net_dev->dev.parent; @@@ -2373,16 -2374,15 +2374,15 @@@ free return ERR_PTR(err); }
- static void free_dpcon(struct dpaa2_eth_priv *priv, - struct fsl_mc_device *dpcon) + static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv, + struct fsl_mc_device *dpcon) { dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); dpcon_close(priv->mc_io, 0, dpcon->mc_handle); fsl_mc_object_free(dpcon); }
- static struct dpaa2_eth_channel * - alloc_channel(struct dpaa2_eth_priv *priv) + static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_channel *channel; struct dpcon_attr attr; @@@ -2393,7 -2393,7 +2393,7 @@@ if (!channel) return NULL;
- channel->dpcon = setup_dpcon(priv); + channel->dpcon = dpaa2_eth_setup_dpcon(priv); if (IS_ERR(channel->dpcon)) { err = PTR_ERR(channel->dpcon); goto err_setup; @@@ -2413,23 -2413,23 +2413,23 @@@ return channel;
err_get_attr: - free_dpcon(priv, channel->dpcon); + dpaa2_eth_free_dpcon(priv, channel->dpcon); err_setup: kfree(channel); return ERR_PTR(err); }
- static void free_channel(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *channel) + static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *channel) { - free_dpcon(priv, channel->dpcon); + dpaa2_eth_free_dpcon(priv, channel->dpcon); kfree(channel); }
/* DPIO setup: allocate and configure QBMan channels, setup core affinity * and register data availability notifications */ - static int setup_dpio(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) { struct dpaa2_io_notification_ctx *nctx; struct dpaa2_eth_channel *channel; @@@ -2449,7 -2449,7 +2449,7 @@@ cpumask_clear(&priv->dpio_cpumask); for_each_online_cpu(i) { /* Try to allocate a channel */ - channel = alloc_channel(priv); + channel = dpaa2_eth_alloc_channel(priv); if (IS_ERR_OR_NULL(channel)) { err = PTR_ERR_OR_ZERO(channel); if (err != -EPROBE_DEFER) @@@ -2462,7 -2462,7 +2462,7 @@@
nctx = &channel->nctx; nctx->is_cdan = 1; - nctx->cb = cdan_cb; + nctx->cb = dpaa2_eth_cdan_cb; nctx->id = channel->ch_id; nctx->desired_cpu = i;
@@@ -2510,14 -2510,14 +2510,14 @@@ err_set_cdan: dpaa2_io_service_deregister(channel->dpio, nctx, dev); err_service_reg: - free_channel(priv, channel); + dpaa2_eth_free_channel(priv, channel); err_alloc_ch: if (err == -EPROBE_DEFER) { for (i = 0; i < priv->num_channels; i++) { channel = priv->channel[i]; nctx = &channel->nctx; dpaa2_io_service_deregister(channel->dpio, nctx, dev); - free_channel(priv, channel); + dpaa2_eth_free_channel(priv, channel); } priv->num_channels = 0; return err; @@@ -2534,7 -2534,7 +2534,7 @@@ return 0; }
- static void free_dpio(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_channel *ch; @@@ -2544,12 -2544,12 +2544,12 @@@ for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); - free_channel(priv, ch); + dpaa2_eth_free_channel(priv, ch); } }
- static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, - int cpu) + static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv, + int cpu) { struct device *dev = priv->net_dev->dev.parent; int i; @@@ -2566,7 -2566,7 +2566,7 @@@ return priv->channel[0]; }
- static void set_fq_affinity(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_fq *fq; @@@ -2597,13 -2597,13 +2597,13 @@@ default: dev_err(dev, "Unknown FQ type: %d\n", fq->type); } - fq->channel = get_affine_channel(priv, fq->target_cpu); + fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu); }
update_xps(priv); }
- static void setup_fqs(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) { int i, j;
@@@ -2627,11 -2627,11 +2627,11 @@@ }
/* For each FQ, decide on which core to process incoming frames */ - set_fq_affinity(priv); + dpaa2_eth_set_fq_affinity(priv); }
/* Allocate and configure one buffer pool for each interface */ - static int setup_dpbp(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) { int err; struct fsl_mc_device *dpbp_dev; @@@ -2690,15 -2690,15 +2690,15 @@@ err_open return err; }
- static void free_dpbp(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) { - drain_pool(priv); + dpaa2_eth_drain_pool(priv); dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); fsl_mc_object_free(priv->dpbp_dev); }
- static int set_buffer_layout(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_buffer_layout buf_layout = {0}; @@@ -2815,7 -2815,7 +2815,7 @@@ static inline int dpaa2_eth_enqueue_fq_ return 0; }
- static void set_enqueue_mode(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv) { if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, DPNI_ENQUEUE_FQID_VER_MINOR) < 0) @@@ -2824,7 -2824,7 +2824,7 @@@ priv->enqueue = dpaa2_eth_enqueue_fq_multiple; }
- static int set_pause(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_link_cfg link_cfg = {0}; @@@ -2851,7 -2851,7 +2851,7 @@@ return 0; }
- static void update_tx_fqids(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv) { struct dpni_queue_id qid = {0}; struct dpaa2_eth_fq *fq; @@@ -2893,7 -2893,7 +2893,7 @@@ out_err }
/* Configure ingress classification based on VLAN PCP */ - static int set_vlan_qos(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpkg_profile_cfg kg_cfg = {0}; @@@ -3005,7 -3005,7 +3005,7 @@@ out_free_tbl }
/* Configure the DPNI object this interface is associated with */ - static int setup_dpni(struct fsl_mc_device *ls_dev) + static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) { struct device *dev = &ls_dev->dev; struct dpaa2_eth_priv *priv; @@@ -3053,20 -3053,20 +3053,20 @@@ goto close; }
- err = set_buffer_layout(priv); + err = dpaa2_eth_set_buffer_layout(priv); if (err) goto close;
- set_enqueue_mode(priv); + dpaa2_eth_set_enqueue_mode(priv);
/* Enable pause frame support */ if (dpaa2_eth_has_pause_support(priv)) { - err = set_pause(priv); + err = dpaa2_eth_set_pause(priv); if (err) goto close; }
- err = set_vlan_qos(priv); + err = dpaa2_eth_set_vlan_qos(priv); if (err && err != -EOPNOTSUPP) goto close;
@@@ -3086,7 -3086,7 +3086,7 @@@ close return err; }
- static void free_dpni(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv) { int err;
@@@ -3098,8 -3098,8 +3098,8 @@@ dpni_close(priv->mc_io, 0, priv->mc_token); }
- static int setup_rx_flow(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq) + static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) { struct device *dev = priv->net_dev->dev.parent; struct dpni_queue queue; @@@ -3150,8 -3150,8 +3150,8 @@@ return 0; }
- static int setup_tx_flow(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq) + static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) { struct device *dev = priv->net_dev->dev.parent; struct dpni_queue queue; @@@ -3266,7 -3266,7 +3266,7 @@@ static const struct dpaa2_eth_dist_fiel };
/* Configure the Rx hash key using the legacy API */ - static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) + static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) { struct device *dev = priv->net_dev->dev.parent; struct dpni_rx_tc_dist_cfg dist_cfg; @@@ -3291,7 -3291,7 +3291,7 @@@ }
/* Configure the Rx hash key using the new API */ - static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) + static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) { struct device *dev = priv->net_dev->dev.parent; struct dpni_rx_dist_cfg dist_cfg; @@@ -3317,7 -3317,7 +3317,7 @@@ }
/* Configure the Rx flow classification key */ - static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) + static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) { struct device *dev = priv->net_dev->dev.parent; struct dpni_rx_dist_cfg dist_cfg; @@@ -3452,11 -3452,11 +3452,11 @@@ static int dpaa2_eth_set_dist_key(struc
if (type == DPAA2_ETH_RX_DIST_HASH) { if (dpaa2_eth_has_legacy_dist(priv)) - err = config_legacy_hash_key(priv, key_iova); + err = dpaa2_eth_config_legacy_hash_key(priv, key_iova); else - err = config_hash_key(priv, key_iova); + err = dpaa2_eth_config_hash_key(priv, key_iova); } else { - err = config_cls_key(priv, key_iova); + err = dpaa2_eth_config_cls_key(priv, key_iova); }
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, @@@ -3531,7 -3531,7 +3531,7 @@@ out /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, * frame queues and channels */ - static int bind_dpni(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; @@@ -3579,10 -3579,10 +3579,10 @@@ for (i = 0; i < priv->num_fqs; i++) { switch (priv->fq[i].type) { case DPAA2_RX_FQ: - err = setup_rx_flow(priv, &priv->fq[i]); + err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]); break; case DPAA2_TX_CONF_FQ: - err = setup_tx_flow(priv, &priv->fq[i]); + err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]); break; default: dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); @@@ -3603,7 -3603,7 +3603,7 @@@ }
/* Allocate rings for storing incoming frame descriptors */ - static int alloc_rings(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; @@@ -3630,7 -3630,7 +3630,7 @@@ err_ring return -ENOMEM; }
- static void free_rings(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) { int i;
@@@ -3638,7 -3638,7 +3638,7 @@@ dpaa2_io_store_destroy(priv->channel[i]->store); }
- static int set_mac_addr(struct dpaa2_eth_priv *priv) + static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; @@@ -3703,7 -3703,7 +3703,7 @@@ return 0; }
- static int netdev_init(struct net_device *net_dev) + static int dpaa2_eth_netdev_init(struct net_device *net_dev) { struct device *dev = net_dev->dev.parent; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@@ -3716,7 -3716,7 +3716,7 @@@ net_dev->netdev_ops = &dpaa2_eth_ops; net_dev->ethtool_ops = &dpaa2_ethtool_ops;
- err = set_mac_addr(priv); + err = dpaa2_eth_set_mac_addr(priv); if (err) return err;
@@@ -3771,13 -3771,13 +3771,13 @@@ return 0; }
- static int poll_link_state(void *arg) + static int dpaa2_eth_poll_link_state(void *arg) { struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; int err;
while (!kthread_should_stop()) { - err = link_state_update(priv); + err = dpaa2_eth_link_state_update(priv); if (unlikely(err)) return err;
@@@ -3847,11 -3847,11 +3847,11 @@@ static irqreturn_t dpni_irq0_handler_th }
if (status & DPNI_IRQ_EVENT_LINK_CHANGED) - link_state_update(netdev_priv(net_dev)); + dpaa2_eth_link_state_update(netdev_priv(net_dev));
if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { - set_mac_addr(netdev_priv(net_dev)); - update_tx_fqids(priv); + dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); + dpaa2_eth_update_tx_fqids(priv);
rtnl_lock(); if (priv->mac) @@@ -3864,7 -3864,7 +3864,7 @@@ return IRQ_HANDLED; }
- static int setup_irqs(struct fsl_mc_device *ls_dev) + static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) { int err = 0; struct fsl_mc_device_irq *irq; @@@ -3910,7 -3910,7 +3910,7 @@@ free_mc_irq return err; }
- static void add_ch_napi(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv) { int i; struct dpaa2_eth_channel *ch; @@@ -3923,7 -3923,7 +3923,7 @@@ } }
- static void del_ch_napi(struct dpaa2_eth_priv *priv) + static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) { int i; struct dpaa2_eth_channel *ch; @@@ -3970,26 -3970,26 +3970,26 @@@ static int dpaa2_eth_probe(struct fsl_m }
/* MC objects initialization and configuration */ - err = setup_dpni(dpni_dev); + err = dpaa2_eth_setup_dpni(dpni_dev); if (err) goto err_dpni_setup;
- err = setup_dpio(priv); + err = dpaa2_eth_setup_dpio(priv); if (err) goto err_dpio_setup;
- setup_fqs(priv); + dpaa2_eth_setup_fqs(priv);
- err = setup_dpbp(priv); + err = dpaa2_eth_setup_dpbp(priv); if (err) goto err_dpbp_setup;
- err = bind_dpni(priv); + err = dpaa2_eth_bind_dpni(priv); if (err) goto err_bind;
/* Add a NAPI context for each channel */ - add_ch_napi(priv); + dpaa2_eth_add_ch_napi(priv);
/* Percpu statistics */ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); @@@ -4012,21 -4012,21 +4012,21 @@@ goto err_alloc_sgt_cache; }
- err = netdev_init(net_dev); + err = dpaa2_eth_netdev_init(net_dev); if (err) goto err_netdev_init;
/* Configure checksum offload based on current interface flags */ - err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); + err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); if (err) goto err_csum;
- err = set_tx_csum(priv, !!(net_dev->features & - (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); + err = dpaa2_eth_set_tx_csum(priv, + !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); if (err) goto err_csum;
- err = alloc_rings(priv); + err = dpaa2_eth_alloc_rings(priv); if (err) goto err_alloc_rings;
@@@ -4039,10 -4039,10 +4039,10 @@@ } #endif
- err = setup_irqs(dpni_dev); + err = dpaa2_eth_setup_irqs(dpni_dev); if (err) { netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); - priv->poll_thread = kthread_run(poll_link_state, priv, + priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, "%s_poll_link", net_dev->name); if (IS_ERR(priv->poll_thread)) { dev_err(dev, "Error starting polling thread\n"); @@@ -4076,7 -4076,7 +4076,7 @@@ err_connect_mac else fsl_mc_free_irqs(dpni_dev); err_poll_thread: - free_rings(priv); + dpaa2_eth_free_rings(priv); err_alloc_rings: err_csum: err_netdev_init: @@@ -4086,13 -4086,13 +4086,13 @@@ err_alloc_sgt_cache err_alloc_percpu_extras: free_percpu(priv->percpu_stats); err_alloc_percpu_stats: - del_ch_napi(priv); + dpaa2_eth_del_ch_napi(priv); err_bind: - free_dpbp(priv); + dpaa2_eth_free_dpbp(priv); err_dpbp_setup: - free_dpio(priv); + dpaa2_eth_free_dpio(priv); err_dpio_setup: - free_dpni(priv); + dpaa2_eth_free_dpni(priv); err_dpni_setup: fsl_mc_portal_free(priv->mc_io); err_portal_alloc: @@@ -4126,15 -4126,15 +4126,15 @@@ static int dpaa2_eth_remove(struct fsl_ else fsl_mc_free_irqs(ls_dev);
- free_rings(priv); + dpaa2_eth_free_rings(priv); free_percpu(priv->sgt_cache); free_percpu(priv->percpu_stats); free_percpu(priv->percpu_extras);
- del_ch_napi(priv); - free_dpbp(priv); - free_dpio(priv); - free_dpni(priv); + dpaa2_eth_del_ch_napi(priv); + dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpio(priv); + dpaa2_eth_free_dpni(priv);
fsl_mc_portal_free(priv->mc_io);
diff --combined drivers/net/ethernet/hisilicon/hns/hns_enet.c index 22522f8a5299,3af33ade7b60..b13f3a5cdf59 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@@ -557,10 -557,7 +557,7 @@@ static int hns_nic_poll_rx_skb(struct h va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */ - prefetch(va); - #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); - #endif + net_prefetch(va);
skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); @@@ -2282,10 -2279,8 +2279,10 @@@ static int hns_nic_dev_probe(struct pla priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + }
/* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@@ -2301,8 -2296,7 +2298,8 @@@ priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; }
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index a4f1d515e5e0,1a1ba6a41bfe..47ab2a5c7391 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@@ -21,7 -21,6 +21,7 @@@ #include <net/pkt_cls.h> #include <net/tcp.h> #include <net/vxlan.h> +#include <net/geneve.h>
#include "hnae3.h" #include "hns3_enet.h" @@@ -781,7 -780,7 +781,7 @@@ static int hns3_get_l4_protocol(struct * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the * hardware will not do the checksum offload when udp dest port is - * 4789. + * 4789 or 6081. */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { @@@ -790,8 -789,7 +790,8 @@@ l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation && - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT)))) return false;
skb_checksum_help(skb); @@@ -2748,7 -2746,7 +2748,7 @@@ static void hns3_rx_checksum(struct hns case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; - /* fall through */ + fallthrough; case HNS3_OL4_TYPE_NO_TUN: l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); @@@ -3093,10 -3091,7 +3093,7 @@@ static int hns3_handle_rx_bd(struct hns * lines. In such a case, single fetch would suffice to cache in the * relevant part of the header. */ - prefetch(ring->va); - #if L1_CACHE_BYTES < 128 - prefetch(ring->va + L1_CACHE_BYTES); - #endif + net_prefetch(ring->va);
if (!skb) { ret = hns3_alloc_skb(ring, length, ring->va); diff --combined drivers/net/ethernet/ibm/ibmvnic.c index d3a774331afc,994358689de9..6b619c190239 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -104,8 -104,7 +104,7 @@@ static int send_login(struct ibmvnic_ad static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); - static int ibmvnic_init(struct ibmvnic_adapter *); - static int ibmvnic_reset_init(struct ibmvnic_adapter *); + static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); static void release_crq_queue(struct ibmvnic_adapter *); static int __ibmvnic_set_mac(struct net_device *, u8 *); static int init_crq_queue(struct ibmvnic_adapter *adapter); @@@ -297,8 -296,7 +296,7 @@@ static void deactivate_rx_pools(struct { int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) + for (i = 0; i < adapter->num_active_rx_pools; i++) adapter->rx_pool[i].active = 0; }
@@@ -306,6 -304,7 +304,7 @@@ static void replenish_rx_pool(struct ib struct ibmvnic_rx_pool *pool) { int count = pool->size - atomic_read(&pool->available); + u64 handle = adapter->rx_scrq[pool->index]->handle; struct device *dev = &adapter->vdev->dev; int buffers_added = 0; unsigned long lpar_rc; @@@ -314,7 -313,6 +313,6 @@@ unsigned int offset; dma_addr_t dma_addr; unsigned char *dst; - u64 *handle_array; int shift = 0; int index; int i; @@@ -322,10 -320,6 +320,6 @@@ if (!pool->active) return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf-> - off_rxadd_subcrqs)); - for (i = 0; i < count; ++i) { skb = alloc_skb(pool->buff_size, GFP_ATOMIC); if (!skb) { @@@ -369,8 -363,7 +363,7 @@@ #endif sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
- lpar_rc = send_subcrq(adapter, handle_array[pool->index], - &sub_crq); + lpar_rc = send_subcrq(adapter, handle, &sub_crq); if (lpar_rc != H_SUCCESS) goto failure;
@@@ -407,8 -400,7 +400,7 @@@ static void replenish_pools(struct ibmv int i;
adapter->replenish_task_cycles++; - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) { + for (i = 0; i < adapter->num_active_rx_pools; i++) { if (adapter->rx_pool[i].active) replenish_rx_pool(adapter, &adapter->rx_pool[i]); } @@@ -475,25 -467,20 +467,23 @@@ static int init_stats_token(struct ibmv static int reset_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; + u64 buff_size; int rx_scrqs; int i, j, rc; - u64 *size_array;
+ if (!adapter->rx_pool) + return -1; + - size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); - - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + buff_size = adapter->cur_rx_buf_sz; + rx_scrqs = adapter->num_active_rx_pools; for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { + if (rx_pool->buff_size != buff_size) { free_long_term_buff(adapter, &rx_pool->long_term_buff); - rx_pool->buff_size = be64_to_cpu(size_array[i]); + rx_pool->buff_size = buff_size; rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, rx_pool->size * @@@ -561,13 -548,11 +551,11 @@@ static int init_rx_pools(struct net_dev struct device *dev = &adapter->vdev->dev; struct ibmvnic_rx_pool *rx_pool; int rxadd_subcrqs; - u64 *size_array; + u64 buff_size; int i, j;
- rxadd_subcrqs = - be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); + rxadd_subcrqs = adapter->num_active_rx_scrqs; + buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), @@@ -585,11 -570,11 +573,11 @@@ netdev_dbg(adapter->netdev, "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", i, adapter->req_rx_add_entries_per_subcrq, - be64_to_cpu(size_array[i])); + buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq; rx_pool->index = i; - rx_pool->buff_size = be64_to_cpu(size_array[i]); + rx_pool->buff_size = buff_size; rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), @@@ -652,10 -637,7 +640,10 @@@ static int reset_tx_pools(struct ibmvni int tx_scrqs; int i, rc;
+ if (!adapter->tx_pool) + return -1; + - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_scrqs = adapter->num_active_tx_pools; for (i = 0; i < tx_scrqs; i++) { rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); if (rc) @@@ -744,7 -726,7 +732,7 @@@ static int init_tx_pools(struct net_dev int tx_subcrqs; int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_subcrqs = adapter->num_active_tx_scrqs; adapter->tx_pool = kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) @@@ -980,7 -962,7 +968,7 @@@ static int set_link_state(struct ibmvni return -1; }
- if (adapter->init_done_rc == 1) { + if (adapter->init_done_rc == PARTIALSUCCESS) { /* Partuial success, delay and re-send */ mdelay(1000); resend = true; @@@ -1530,9 -1512,9 +1518,9 @@@ static netdev_tx_t ibmvnic_xmit(struct unsigned int offset; int num_entries = 1; unsigned char *dst; - u64 *handle_array; int index = 0; u8 proto = 0; + u64 handle; netdev_tx_t ret = NETDEV_TX_OK;
if (test_bit(0, &adapter->resetting)) { @@@ -1559,8 -1541,7 +1547,7 @@@
tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); - handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); + handle = tx_scrq->handle;
index = tx_pool->free_map[tx_pool->consumer_index];
@@@ -1672,14 -1653,14 +1659,14 @@@ ret = NETDEV_TX_OK; goto tx_err_out; } - lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], + lpar_rc = send_subcrq_indirect(adapter, handle, (u64)tx_buff->indir_dma, (u64)num_entries); dma_unmap_single(dev, tx_buff->indir_dma, sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); } else { tx_buff->num_entries = num_entries; - lpar_rc = send_subcrq(adapter, handle_array[queue_num], + lpar_rc = send_subcrq(adapter, handle, &tx_crq); } if (lpar_rc != H_SUCCESS) { @@@ -1874,7 -1855,7 +1861,7 @@@ static int do_change_param_reset(struc return rc; }
- rc = ibmvnic_reset_init(adapter); + rc = ibmvnic_reset_init(adapter, true); if (rc) return IBMVNIC_INIT_FAILED;
@@@ -1992,7 -1973,7 +1979,7 @@@ static int do_reset(struct ibmvnic_adap goto out; }
- rc = ibmvnic_reset_init(adapter); + rc = ibmvnic_reset_init(adapter, true); if (rc) { rc = IBMVNIC_INIT_FAILED; goto out; @@@ -2017,10 -1998,7 +2004,10 @@@ adapter->req_rx_add_entries_per_subcrq != old_num_rx_slots || adapter->req_tx_entries_per_subcrq != - old_num_tx_slots) { + old_num_tx_slots || + !adapter->rx_pool || + !adapter->tso_pool || + !adapter->tx_pool) { release_rx_pools(adapter); release_tx_pools(adapter); release_napi(adapter); @@@ -2033,14 -2011,10 +2020,14 @@@ } else { rc = reset_tx_pools(adapter); if (rc) + netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + rc); goto out;
rc = reset_rx_pools(adapter); if (rc) + netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + rc); goto out; } ibmvnic_disable_irqs(adapter); @@@ -2106,7 -2080,7 +2093,7 @@@ static int do_hard_reset(struct ibmvnic return rc; }
- rc = ibmvnic_init(adapter); + rc = ibmvnic_reset_init(adapter, false); if (rc) return rc;
@@@ -3581,8 -3555,7 +3568,7 @@@ static int ibmvnic_send_crq(struct ibmv if (rc) { if (rc == H_CLOSED) { dev_warn(dev, "CRQ Queue closed\n"); - if (test_bit(0, &adapter->resetting)) - ibmvnic_reset(adapter, VNIC_RESET_FATAL); + /* do not reset, report the fail, wait for passive init from server */ }
dev_warn(dev, "Send error (rc=%d)\n", rc); @@@ -3593,14 -3566,31 +3579,31 @@@
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) { + struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; + int retries = 100; + int rc;
memset(&crq, 0, sizeof(crq)); crq.generic.first = IBMVNIC_CRQ_INIT_CMD; crq.generic.cmd = IBMVNIC_CRQ_INIT; netdev_dbg(adapter->netdev, "Sending CRQ init\n");
- return ibmvnic_send_crq(adapter, &crq); + do { + rc = ibmvnic_send_crq(adapter, &crq); + if (rc != H_CLOSED) + break; + retries--; + msleep(50); + + } while (retries > 0); + + if (rc) { + dev_err(dev, "Failed to send init request, rc = %d\n", rc); + return rc; + } + + return 0; }
static int send_version_xchg(struct ibmvnic_adapter *adapter) @@@ -4305,6 -4295,11 +4308,11 @@@ static int handle_login_rsp(union ibmvn struct net_device *netdev = adapter->netdev; struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; struct ibmvnic_login_buffer *login = adapter->login_buf; + u64 *tx_handle_array; + u64 *rx_handle_array; + int num_tx_pools; + int num_rx_pools; + u64 *size_array; int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, @@@ -4339,6 -4334,30 +4347,30 @@@ ibmvnic_remove(adapter->vdev); return -EIO; } + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); + /* variable buffer sizes are not supported, so just read the + * first entry. + */ + adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); + + num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + + tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); + rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); + + for (i = 0; i < num_tx_pools; i++) + adapter->tx_scrq[i]->handle = tx_handle_array[i]; + + for (i = 0; i < num_rx_pools; i++) + adapter->rx_scrq[i]->handle = rx_handle_array[i]; + + adapter->num_active_tx_scrqs = num_tx_pools; + adapter->num_active_rx_scrqs = num_rx_pools; + release_login_rsp_buffer(adapter); release_login_buffer(adapter); complete(&adapter->init_done);
@@@ -4984,7 -5003,7 +5016,7 @@@ map_failed return retrc; }
- static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) + static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); @@@ -4993,12 -5012,19 +5025,19 @@@
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues; - old_num_tx_queues = adapter->req_tx_queues; + if (reset) { + old_num_rx_queues = adapter->req_rx_queues; + old_num_tx_queues = adapter->req_tx_queues; + reinit_completion(&adapter->init_done); + }
- reinit_completion(&adapter->init_done); adapter->init_done_rc = 0; - ibmvnic_send_crq_init(adapter); + rc = ibmvnic_send_crq_init(adapter); + if (rc) { + dev_err(dev, "Send crq init failed with error %d\n", rc); + return rc; + } + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Initialization sequence timed out\n"); return -1; @@@ -5009,13 -5035,8 +5048,8 @@@ return adapter->init_done_rc; }
- if (adapter->from_passive_init) { - adapter->state = VNIC_OPEN; - adapter->from_passive_init = false; - return -1; - } - - if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && + if (reset && + test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && adapter->reset_reason != VNIC_RESET_MOBILITY) { if (adapter->req_rx_queues != old_num_rx_queues || adapter->req_tx_queues != old_num_tx_queues) { @@@ -5043,48 -5064,6 +5077,6 @@@ return rc; }
- static int ibmvnic_init(struct ibmvnic_adapter *adapter) - { - struct device *dev = &adapter->vdev->dev; - unsigned long timeout = msecs_to_jiffies(30000); - int rc; - - adapter->from_passive_init = false; - - adapter->init_done_rc = 0; - ibmvnic_send_crq_init(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - dev_err(dev, "Initialization sequence timed out\n"); - return -1; - } - - if (adapter->init_done_rc) { - release_crq_queue(adapter); - return adapter->init_done_rc; - } - - if (adapter->from_passive_init) { - adapter->state = VNIC_OPEN; - adapter->from_passive_init = false; - return -1; - } - - rc = init_sub_crqs(adapter); - if (rc) { - dev_err(dev, "Initialization of sub crqs failed\n"); - release_crq_queue(adapter); - return rc; - } - - rc = init_sub_crq_irqs(adapter); - if (rc) { - dev_err(dev, "Failed to initialize sub crq irqs\n"); - release_crq_queue(adapter); - } - - return rc; - } - static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) @@@ -5147,7 -5126,7 +5139,7 @@@ goto ibmvnic_init_fail; }
- rc = ibmvnic_init(adapter); + rc = ibmvnic_reset_init(adapter, false); if (rc && rc != EAGAIN) goto ibmvnic_init_fail; } while (rc == EAGAIN); @@@ -5297,8 -5276,7 +5289,7 @@@ static unsigned long ibmvnic_get_desire for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) + for (i = 0; i < adapter->num_active_rx_pools; i++) ret += adapter->rx_pool[i].size * IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
diff --combined drivers/net/ethernet/intel/igb/igb_main.c index d9c3a6b169f9,698bb6a4b088..e1e37d0b7703 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -718,6 -718,7 +718,6 @@@ static void igb_cache_ring_register(str case e1000_i354: case e1000_i210: case e1000_i211: - fallthrough; default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@@ -8046,10 -8047,7 +8046,7 @@@ static struct sk_buff *igb_construct_sk struct sk_buff *skb;
/* prefetch first cache line of first page */ - prefetch(va); - #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); - #endif + net_prefetch(va);
/* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); @@@ -8103,10 -8101,7 +8100,7 @@@ static struct sk_buff *igb_build_skb(st struct sk_buff *skb;
/* prefetch first cache line of first page */ - prefetch(va); - #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); - #endif + net_prefetch(va);
/* build an skb around the page buffer */ skb = build_skb(va - IGB_SKB_PAD, truesize); diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 01a793105599,bb8c607cdcba..08181fc5f5d4 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@@ -737,7 -737,7 +737,7 @@@ static int rvu_nix_aq_enq_inst(struct r else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, sizeof(struct nix_rx_mce_s)); - /* Fall through */ + fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); @@@ -3319,6 -3319,49 +3319,49 @@@ void rvu_nix_lf_teardown(struct rvu *rv nix_ctx_free(rvu, pfvf); }
+ #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) + + static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) + { + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr; + int nixlf; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + + if (enable) + cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; + else + cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + return 0; + } + + int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) + { + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); + } + + int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) + { + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); + } + int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, struct nix_lso_format_cfg *req, struct nix_lso_format_cfg_rsp *rsp) diff --combined drivers/net/ethernet/netronome/nfp/flower/offload.c index 36356f96661d,44cf738636ef..1c59aff2163c --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@@ -31,6 -31,7 +31,7 @@@ BIT(FLOW_DISSECTOR_KEY_PORTS) | \ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT(FLOW_DISSECTOR_KEY_CVLAN) | \ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ @@@ -66,7 -67,8 +67,8 @@@ NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ - (NFP_FLOWER_LAYER_PORT | \ + (NFP_FLOWER_LAYER_EXT_META | \ + NFP_FLOWER_LAYER_PORT | \ NFP_FLOWER_LAYER_MAC | \ NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV6) @@@ -285,6 -287,30 +287,30 @@@ nfp_flower_calculate_key_layers(struct NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); return -EOPNOTSUPP; } + if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && + !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_size += sizeof(struct nfp_flower_vlan); + key_layer_two |= NFP_FLOWER_LAYER2_QINQ; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_match_vlan cvlan; + + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload"); + return -EOPNOTSUPP; + } + + flow_rule_match_vlan(rule, &cvlan); + if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_size += sizeof(struct nfp_flower_vlan); + key_layer_two |= NFP_FLOWER_LAYER2_QINQ; + } }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { @@@ -784,7 -810,7 +810,7 @@@ nfp_flower_copy_pre_actions(char *act_d case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: if (tunnel_act) *tunnel_act = true; - /* fall through */ + fallthrough; case NFP_FL_ACTION_OPCODE_PRE_LAG: memcpy(act_dst + act_off, act_src + act_off, act_len); break; @@@ -1066,6 -1092,7 +1092,7 @@@ err_destroy_merge_flow * nfp_flower_validate_pre_tun_rule() * @app: Pointer to the APP handle * @flow: Pointer to NFP flow representation of rule + * @key_ls: Pointer to NFP key layers structure * @extack: Netlink extended ACK report * * Verifies the flow as a pre-tunnel rule. @@@ -1075,10 -1102,13 +1102,13 @@@ static int nfp_flower_validate_pre_tun_rule(struct nfp_app *app, struct nfp_fl_payload *flow, + struct nfp_fl_key_ls *key_ls, struct netlink_ext_ack *extack) { + struct nfp_flower_priv *priv = app->priv; struct nfp_flower_meta_tci *meta_tci; struct nfp_flower_mac_mpls *mac; + u8 *ext = flow->unmasked_data; struct nfp_fl_act_head *act; u8 *mask = flow->mask_data; bool vlan = false; @@@ -1086,20 -1116,25 +1116,25 @@@ u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; - if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { - u16 vlan_tci = be16_to_cpu(meta_tci->tci); - - vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; - flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); - vlan = true; - } else { - flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + key_layer = key_ls->key_layer; + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { + u16 vlan_tci = be16_to_cpu(meta_tci->tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } }
- key_layer = meta_tci->nfp_flow_key_layer; if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); return -EOPNOTSUPP; + } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); + return -EOPNOTSUPP; }
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { @@@ -1109,7 -1144,13 +1144,13 @@@
/* Skip fields known to exist. */ mask += sizeof(struct nfp_flower_meta_tci); + ext += sizeof(struct nfp_flower_meta_tci); + if (key_ls->key_layer_two) { + mask += sizeof(struct nfp_flower_ext_meta); + ext += sizeof(struct nfp_flower_ext_meta); + } mask += sizeof(struct nfp_flower_in_port); + ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */ mac = (struct nfp_flower_mac_mpls *)mask; @@@ -1118,6 -1159,8 +1159,8 @@@ return -EOPNOTSUPP; }
+ mask += sizeof(struct nfp_flower_mac_mpls); + ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 || key_layer & NFP_FLOWER_LAYER_IPV6) { /* Flags and proto fields have same offset in IPv4 and IPv6. */ @@@ -1130,7 -1173,6 +1173,6 @@@ sizeof(struct nfp_flower_ipv4) : sizeof(struct nfp_flower_ipv6);
- mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */ for (i = 0; i < size; i++) @@@ -1138,6 -1180,25 +1180,25 @@@ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); return -EOPNOTSUPP; } + ext += size; + mask += size; + } + + if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + struct nfp_flower_vlan *vlan_tags; + u16 vlan_tci; + + vlan_tags = (struct nfp_flower_vlan *)ext; + + vlan_tci = be16_to_cpu(vlan_tags->outer_tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } }
/* Action must be a single egress or pop_vlan and egress. */ @@@ -1220,7 -1281,7 +1281,7 @@@ nfp_flower_add_offload(struct nfp_app * goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) { - err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack); if (err) goto err_destroy_flow; } diff --combined drivers/net/ethernet/pensando/ionic/ionic_txrx.c index def65fee27b5,c3291decd4c3..fa38c41f38d8 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@@ -22,7 -22,7 +22,7 @@@ static bool ionic_tx_service(struct ion static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { - DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); + DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg); } @@@ -32,7 -32,7 +32,7 @@@ static inline void ionic_rxq_post(struc { ionic_q_post(q, ring_dbell, cb_func, cb_arg);
- DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); + DEBUG_STATS_RX_BUFF_CNT(q); }
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) @@@ -49,7 -49,7 +49,7 @@@ static struct sk_buff *ionic_rx_skb_all struct sk_buff *skb;
netdev = lif->netdev; - stats = q_to_rx_stats(q); + stats = &q->lif->rxqstats[q->index];
if (frags) skb = napi_get_frags(&q_to_qcq(q)->napi); @@@ -235,14 -235,14 +235,14 @@@ static bool ionic_rx_service(struct ion return false;
/* check for empty queue */ - if (q->tail->index == q->head->index) + if (q->tail_idx == q->head_idx) return false;
- desc_info = q->tail; + desc_info = &q->info[q->tail_idx]; if (desc_info->index != le16_to_cpu(comp->comp_index)) return false;
- q->tail = desc_info->next; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */ ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); @@@ -338,7 -338,7 +338,7 @@@ void ionic_rx_fill(struct ionic_queue *
for (i = ionic_q_space_avail(q); i; i--) { remain_len = len; - desc_info = q->head; + desc_info = &q->info[q->head_idx]; desc = desc_info->desc; sg_desc = desc_info->sg_desc; page_info = &desc_info->pages[0]; @@@ -387,7 -387,7 +387,7 @@@ }
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, - q->dbval | q->head->index); + q->dbval | q->head_idx); }
static void ionic_rx_fill_cb(void *arg) @@@ -397,25 -397,29 +397,29 @@@
void ionic_rx_empty(struct ionic_queue *q) { - struct ionic_desc_info *cur; + struct ionic_desc_info *desc_info; struct ionic_rxq_desc *desc; unsigned int i; + u16 idx;
- for (cur = q->tail; cur != q->head; cur = cur->next) { - desc = cur->desc; + idx = q->tail_idx; + while (idx != q->head_idx) { + desc_info = &q->info[idx]; + desc = desc_info->desc; desc->addr = 0; desc->len = 0;
- for (i = 0; i < cur->npages; i++) { - if (likely(cur->pages[i].page)) { - ionic_rx_page_free(q, cur->pages[i].page, - cur->pages[i].dma_addr); - cur->pages[i].page = NULL; - cur->pages[i].dma_addr = 0; + for (i = 0; i < desc_info->npages; i++) { + if (likely(desc_info->pages[i].page)) { + ionic_rx_page_free(q, desc_info->pages[i].page, + desc_info->pages[i].dma_addr); + desc_info->pages[i].page = NULL; + desc_info->pages[i].dma_addr = 0; } }
- cur->cb_arg = NULL; + desc_info->cb_arg = NULL; + idx = (idx + 1) & (q->num_descs - 1); } }
@@@ -496,11 -500,13 +500,11 @@@ int ionic_txrx_napi(struct napi_struct struct ionic_cq *txcq; u32 rx_work_done = 0; u32 tx_work_done = 0; - u32 work_done = 0; u32 flags = 0; - bool unmask;
lif = rxcq->bound_q->lif; idev = &lif->ionic->idev; - txcq = &lif->txqcqs[qi].qcq->cq; + txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget, ionic_tx_service, NULL, NULL); @@@ -510,12 -516,17 +514,12 @@@ if (rx_work_done) ionic_rx_fill_cb(rxcq->bound_q);
- unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); - - if (unmask && napi_complete_done(napi, rx_work_done)) { + if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { flags |= IONIC_INTR_CRED_UNMASK; DEBUG_STATS_INTR_REARM(rxcq->bound_intr); - work_done = rx_work_done; - } else { - work_done = budget; }
- if (work_done || flags) { + if (rx_work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, tx_work_done + rx_work_done, flags); @@@ -524,7 -535,7 +528,7 @@@ DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
- return work_done; + return rx_work_done; }
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, @@@ -623,9 -634,9 +627,9 @@@ static bool ionic_tx_service(struct ion * several q entries completed for each cq completion */ do { - desc_info = q->tail; - q->tail = desc_info->next; - ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg); + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); desc_info->cb = NULL; desc_info->cb_arg = NULL; } while (desc_info->index != le16_to_cpu(comp->comp_index)); @@@ -651,9 -662,9 +655,9 @@@ void ionic_tx_empty(struct ionic_queue int done = 0;
/* walk the not completed tx entries, if any */ - while (q->head != q->tail) { - desc_info = q->tail; - q->tail = desc_info->next; + while (q->head_idx != q->tail_idx) { + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); desc_info->cb = NULL; desc_info->cb_arg = NULL; @@@ -741,8 -752,8 +745,8 @@@ static void ionic_tx_tso_post(struct io static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) { - struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; - struct ionic_txq_desc *desc = q->head->desc; + struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems; return desc; @@@ -751,13 -762,13 +755,13 @@@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_desc_info *abort = q->head; + struct ionic_desc_info *rewind_desc_info; struct device *dev = q->lif->ionic->dev; - struct ionic_desc_info *rewind = abort; struct ionic_txq_sg_elem *elem; struct ionic_txq_desc *desc; unsigned int frag_left = 0; unsigned int offset = 0; + u16 abort = q->head_idx; unsigned int len_left; dma_addr_t desc_addr; unsigned int hdrlen; @@@ -765,6 -776,7 +769,7 @@@ unsigned int seglen; u64 total_bytes = 0; u64 total_pkts = 0; + u16 rewind = abort; unsigned int left; unsigned int len; unsigned int mss; @@@ -909,19 -921,20 +914,20 @@@ return 0;
err_out_abort: - while (rewind->desc != q->head->desc) { - ionic_tx_clean(q, rewind, NULL, NULL); - rewind = rewind->next; + while (rewind != q->head_idx) { + rewind_desc_info = &q->info[rewind]; + ionic_tx_clean(q, rewind_desc_info, NULL, NULL); + rewind = (rewind + 1) & (q->num_descs - 1); } - q->head = abort; + q->head_idx = abort;
return -ENOMEM; }
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = q->head->desc; struct device *dev = q->lif->ionic->dev; dma_addr_t dma_addr; bool has_vlan; @@@ -960,8 -973,8 +966,8 @@@
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = q->head->desc; struct device *dev = q->lif->ionic->dev; dma_addr_t dma_addr; bool has_vlan; @@@ -995,7 -1008,7 +1001,7 @@@
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) { - struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; unsigned int len_left = skb->len - skb_headlen(skb); struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); @@@ -1104,9 -1117,9 +1110,9 @@@ netdev_tx_t ionic_start_xmit(struct sk_ return NETDEV_TX_OK; }
- if (unlikely(!lif_to_txqcq(lif, queue_index))) + if (unlikely(queue_index >= lif->nxqs)) queue_index = 0; - q = lif_to_txq(lif, queue_index); + q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb); if (ndescs < 0) diff --combined drivers/net/ethernet/qlogic/qed/qed_dev.c index b8f076e4e6b8,00f2d7f13de6..f7f08e6a3acf --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@@ -3109,14 -3109,14 +3109,14 @@@ int qed_hw_init(struct qed_dev *cdev, s p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break;
- /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, @@@ -3973,6 -3973,7 +3973,7 @@@ static int qed_hw_get_nvm_info(struct q struct qed_mcp_link_speed_params *ext_speed; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; + int i;
/* Read global nvm_cfg address */ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); @@@ -4290,6 -4291,14 +4291,14 @@@ __set_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities);
+ /* Read device serial number information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, serial_number); + + for (i = 0; i < 4; i++) + p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4); + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); }
diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index f39f629242a1,db5d003770ba..5b149ceff6b6 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -39,6 -39,7 +39,7 @@@ #include "qed_hw.h" #include "qed_selftest.h" #include "qed_debug.h" + #include "qed_devlink.h"
#define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) @@@ -478,6 -479,7 +479,7 @@@ int qed_fill_dev_info(struct qed_dev *c }
dev_info->mtu = hw_info->mtu; + cdev->common_dev_info = *dev_info;
return 0; } @@@ -510,107 -512,6 +512,6 @@@ static int qed_set_power_state(struct q return 0; }
- struct qed_devlink { - struct qed_dev *cdev; - }; - - enum qed_devlink_param_id { - QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, - QED_DEVLINK_PARAM_ID_IWARP_CMT, - }; - - static int qed_dl_param_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) - { - struct qed_devlink *qed_dl; - struct qed_dev *cdev; - - qed_dl = devlink_priv(dl); - cdev = qed_dl->cdev; - ctx->val.vbool = cdev->iwarp_cmt; - - return 0; - } - - static int qed_dl_param_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) - { - struct qed_devlink *qed_dl; - struct qed_dev *cdev; - - qed_dl = devlink_priv(dl); - cdev = qed_dl->cdev; - cdev->iwarp_cmt = ctx->val.vbool; - - return 0; - } - - static const struct devlink_param qed_devlink_params[] = { - DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, - "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, - BIT(DEVLINK_PARAM_CMODE_RUNTIME), - qed_dl_param_get, qed_dl_param_set, NULL), - }; - - static const struct devlink_ops qed_dl_ops; - - static int qed_devlink_register(struct qed_dev *cdev) - { - union devlink_param_value value; - struct qed_devlink *qed_dl; - struct devlink *dl; - int rc; - - dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl)); - if (!dl) - return -ENOMEM; - - qed_dl = devlink_priv(dl); - - cdev->dl = dl; - qed_dl->cdev = cdev; - - rc = devlink_register(dl, &cdev->pdev->dev); - if (rc) - goto err_free; - - rc = devlink_params_register(dl, qed_devlink_params, - ARRAY_SIZE(qed_devlink_params)); - if (rc) - goto err_unregister; - - value.vbool = false; - devlink_param_driverinit_value_set(dl, - QED_DEVLINK_PARAM_ID_IWARP_CMT, - value); - - devlink_params_publish(dl); - cdev->iwarp_cmt = false; - - return 0; - - err_unregister: - devlink_unregister(dl); - - err_free: - cdev->dl = NULL; - devlink_free(dl); - - return rc; - } - - static void qed_devlink_unregister(struct qed_dev *cdev) - { - if (!cdev->dl) - return; - - devlink_params_unregister(cdev->dl, qed_devlink_params, - ARRAY_SIZE(qed_devlink_params)); - - devlink_unregister(cdev->dl); - devlink_free(cdev->dl); - } - /* probing */ static struct qed_dev *qed_probe(struct pci_dev *pdev, struct qed_probe_params *params) @@@ -639,12 -540,6 +540,6 @@@ } DP_INFO(cdev, "PCI init completed successfully\n");
- rc = qed_devlink_register(cdev); - if (rc) { - DP_INFO(cdev, "Failed to register devlink.\n"); - goto err2; - } - rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); if (rc) { DP_ERR(cdev, "hw prepare failed\n"); @@@ -674,8 -569,6 +569,6 @@@ static void qed_remove(struct qed_dev *
qed_set_power_state(cdev, PCI_D3hot);
- qed_devlink_unregister(cdev); - qed_free_cdev(cdev); }
@@@ -761,7 -654,7 +654,7 @@@ static int qed_set_int_mode(struct qed_ kfree(int_params->msix_table); if (force_mode) goto out; - /* Fallthrough */ + fallthrough;
case QED_INT_MODE_MSI: if (cdev->num_hwfns == 1) { @@@ -775,7 -668,7 +668,7 @@@ if (force_mode) goto out; } - /* Fallthrough */ + fallthrough;
case QED_INT_MODE_INTA: int_params->out.int_mode = QED_INT_MODE_INTA; @@@ -2924,7 -2817,7 +2817,7 @@@ static int qed_set_led(struct qed_dev * return status; }
- static int qed_recovery_process(struct qed_dev *cdev) + int qed_recovery_process(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; @@@ -3112,6 -3005,9 +3005,9 @@@ const struct qed_common_ops qed_common_ .get_link = &qed_get_current_link, .drain = &qed_drain, .update_msglvl = &qed_init_dp, + .devlink_register = qed_devlink_register, + .devlink_unregister = qed_devlink_unregister, + .report_fatal_error = qed_report_fatal_error, .dbg_all_data = &qed_dbg_all_data, .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, diff --combined drivers/net/ethernet/realtek/r8169_main.c index fc9e6626db55,c427865d51a4..9e4e6a883877 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@@ -617,7 -617,6 +617,6 @@@ struct rtl8169_private struct work_struct work; } wk;
- unsigned irq_enabled:1; unsigned supports_gmii:1; unsigned aspm_manageable:1; dma_addr_t counters_phys_addr; @@@ -1280,12 -1279,10 +1279,10 @@@ static void rtl_irq_disable(struct rtl8 RTL_W32(tp, IntrMask_8125, 0); else RTL_W16(tp, IntrMask, 0); - tp->irq_enabled = 0; }
static void rtl_irq_enable(struct rtl8169_private *tp) { - tp->irq_enabled = 1; if (rtl_is_8125(tp)) RTL_W32(tp, IntrMask_8125, tp->irq_mask); else @@@ -4541,8 -4538,7 +4538,7 @@@ static irqreturn_t rtl8169_interrupt(in struct rtl8169_private *tp = dev_instance; u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || (status & 0xffff) == 0xffff || - !(status & tp->irq_mask)) + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) return IRQ_NONE;
if (unlikely(status & SYSErr)) { @@@ -4596,10 -4592,8 +4592,8 @@@ static int rtl8169_poll(struct napi_str
rtl_tx(dev, tp, budget);
- if (work_done < budget) { - napi_complete_done(napi, work_done); + if (work_done < budget && napi_complete_done(napi, work_done)) rtl_irq_enable(tp); - }
return work_done; } @@@ -4994,7 -4988,7 +4988,7 @@@ static int rtl_alloc_irq(struct rtl8169 rtl_unlock_config_regs(tp); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; @@@ -5137,7 -5131,7 +5131,7 @@@ static void rtl_hw_initialize(struct rt switch (tp->mac_version) { case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_stop_cmac(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; diff --combined drivers/net/ethernet/renesas/ravb_main.c index df89d09b253e,adc8c8f3b5fc..f684296df871 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -162,7 -162,7 +162,7 @@@ static int ravb_get_mdio_data(struct md }
/* MDIO bus control struct */ - static struct mdiobb_ops bb_ops = { + static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = ravb_set_mdc, .set_mdio_dir = ravb_set_mdio_dir, @@@ -1342,51 -1342,6 +1342,51 @@@ static inline int ravb_hook_irq(unsigne return error; }
+/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@@ -1395,13 -1350,6 +1395,13 @@@ struct device *dev = &pdev->dev; int error;
+ /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + netdev_err(ndev, "failed to initialize MDIO\n"); + return error; + } + napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]);
@@@ -1479,7 -1427,6 +1479,7 @@@ out_free_irq out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); return error; }
@@@ -1789,8 -1736,6 +1789,8 @@@ static int ravb_close(struct net_devic ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC);
+ ravb_mdio_release(priv); + return 0; }
@@@ -1942,6 -1887,51 +1942,6 @@@ static const struct net_device_ops ravb .ndo_set_features = ravb_set_features, };
-/* MDIO bus init function */ -static int ravb_mdio_init(struct ravb_private *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - -out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; -} - -/* MDIO bus release function */ -static int ravb_mdio_release(struct ravb_private *priv) -{ - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; -} - static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@@ -2184,6 -2174,13 +2184,6 @@@ static int ravb_probe(struct platform_d eth_hw_addr_random(ndev); }
- /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; - } - netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
@@@ -2205,6 -2202,8 +2205,6 @@@ out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); -out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma);
@@@ -2236,6 -2235,7 +2236,6 @@@ static int ravb_remove(struct platform_ unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); diff --combined drivers/net/ethernet/sfc/farch.c index 4002f9a3ae90,0d9795fb9356..a48a931ad0e8 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@@ -863,13 -863,8 +863,8 @@@ static u16 efx_farch_handle_rx_not_ok(s bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; bool rx_ev_frm_trunc, rx_ev_tobe_disc; bool rx_ev_other_err, rx_ev_pause_frm; - bool rx_ev_hdr_type, rx_ev_mcast_pkt; - unsigned rx_ev_pkt_type;
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); - rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, @@@ -918,6 -913,8 +913,8 @@@ rx_ev_tobe_disc ? " [TOBE_DISC]" : "", rx_ev_pause_frm ? " [PAUSE]" : ""); } + #else + (void) rx_ev_other_err; #endif
if (efx->net_dev->features & NETIF_F_RXALL) @@@ -1038,10 -1035,10 +1035,10 @@@ efx_farch_handle_rx_event(struct efx_ch switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags |= EFX_RX_PKT_TCP; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags |= EFX_RX_PKT_CSUMMED; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break; @@@ -1316,7 -1313,7 +1313,7 @@@ int efx_farch_ev_process(struct efx_cha if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; - /* else fall through */ + fallthrough; default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " @@@ -2043,7 -2040,7 +2040,7 @@@ efx_farch_filter_from_gen_spec(struct e EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): is_full = true; - /* fall through */ + fallthrough; case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { __be32 rhost, host1, host2; @@@ -2094,7 -2091,7 +2091,7 @@@
case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: is_full = true; - /* fall through */ + fallthrough; case EFX_FILTER_MATCH_LOC_MAC: spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : EFX_FARCH_FILTER_MAC_WILD); @@@ -2141,7 -2138,7 +2138,7 @@@ efx_farch_filter_to_gen_spec(struct efx case EFX_FARCH_FILTER_TCP_FULL: case EFX_FARCH_FILTER_UDP_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_TCP_WILD: case EFX_FARCH_FILTER_UDP_WILD: { __be32 host1, host2; @@@ -2185,7 -2182,7 +2182,7 @@@
case EFX_FARCH_FILTER_MAC_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_MAC_WILD: gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; if (is_full) @@@ -2592,7 -2589,6 +2589,6 @@@ int efx_farch_filter_remove_safe(struc enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; unsigned int filter_idx; - struct efx_farch_filter_spec *spec; int rc;
table_id = efx_farch_filter_id_table_id(filter_id); @@@ -2604,7 -2600,6 +2600,6 @@@ if (filter_idx >= table->size) return -ENOENT; down_write(&state->lock); - spec = &table->spec[filter_idx];
rc = efx_farch_filter_remove(efx, table, filter_idx, priority); up_write(&state->lock); diff --combined drivers/net/ethernet/sun/sungem.c index 8deb943ca5de,b7093975b14c..58f142ee78a3 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@@ -2712,7 -2712,7 +2712,7 @@@ static int gem_ioctl(struct net_device switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; - /* Fallthrough... */ + fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, @@@ -2965,9 -2965,8 +2965,8 @@@ static int gem_init_one(struct pci_dev /* It is guaranteed that the returned buffer will be at least * PAGE_SIZE aligned. */ - gp->init_block = (struct gem_init_block *) - dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), - &gp->gblock_dvma, GFP_KERNEL); + gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), + &gp->gblock_dvma, GFP_KERNEL); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; diff --combined drivers/net/gtp.c index 8e47d0112e5d,2ed1e82a8ad8..611722eafed8 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@@ -928,8 -928,8 +928,8 @@@ static void ipv4_pdp_fill(struct pdp_ct } }
- static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) + static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; @@@ -956,12 -956,12 +956,12 @@@
if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) - return -EEXIST; + return ERR_PTR(-EEXIST); if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) - return -EOPNOTSUPP; + return ERR_PTR(-EOPNOTSUPP);
if (pctx && pctx_tid) - return -EEXIST; + return ERR_PTR(-EEXIST); if (!pctx) pctx = pctx_tid;
@@@ -974,13 -974,13 +974,13 @@@ netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- return 0; + return pctx;
}
pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC); if (pctx == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM);
sock_hold(sk); pctx->sk = sk; @@@ -1018,7 -1018,7 +1018,7 @@@ break; }
- return 0; + return pctx; }
static void pdp_context_free(struct rcu_head *head) @@@ -1036,9 -1036,12 +1036,12 @@@ static void pdp_context_delete(struct p call_rcu(&pctx->rcu_head, pdp_context_free); }
+ static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation); + static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) { unsigned int version; + struct pdp_ctx *pctx; struct gtp_dev *gtp; struct sock *sk; int err; @@@ -1068,7 -1071,6 +1071,6 @@@ }
rtnl_lock(); - rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); if (!gtp) { @@@ -1088,10 -1090,15 +1090,15 @@@ goto out_unlock; }
- err = gtp_pdp_add(gtp, sk, info); + pctx = gtp_pdp_add(gtp, sk, info); + if (IS_ERR(pctx)) { + err = PTR_ERR(pctx); + } else { + gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL); + err = 0; + }
out_unlock: - rcu_read_unlock(); rtnl_unlock(); return err; } @@@ -1159,6 -1166,7 +1166,7 @@@ static int gtp_genl_del_pdp(struct sk_b netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+ gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC); pdp_context_delete(pctx);
out_unlock: @@@ -1168,6 -1176,14 +1176,14 @@@
static struct genl_family gtp_genl_family;
+ enum gtp_multicast_groups { + GTP_GENL_MCGRP, + }; + + static const struct genl_multicast_group gtp_genl_mcgrps[] = { + [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME }, + }; + static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct pdp_ctx *pctx) { @@@ -1179,7 -1195,6 +1195,7 @@@ goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; @@@ -1205,6 -1220,26 +1221,26 @@@ nla_put_failure return -EMSGSIZE; }
+ static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation) + { + struct sk_buff *msg; + int ret; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation); + if (!msg) + return -ENOMEM; + + ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx); + if (ret < 0) { + nlmsg_free(msg); + return ret; + } + + ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg, + 0, GTP_GENL_MCGRP, GFP_ATOMIC); + return ret; + } + static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx = NULL; @@@ -1335,6 -1370,8 +1371,8 @@@ static struct genl_family gtp_genl_fami .module = THIS_MODULE, .ops = gtp_genl_ops, .n_ops = ARRAY_SIZE(gtp_genl_ops), + .mcgrps = gtp_genl_mcgrps, + .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps), };
static int __net_init gtp_net_init(struct net *net) diff --combined drivers/net/phy/dp83640.c index 79e67f2fe00a,fc3d747eba55..f2caccaf4408 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@@ -766,13 -766,13 +766,13 @@@ static int decode_evnt(struct dp83640_p switch (words) { case 3: dp83640->edata.sec_hi = phy_txts->sec_hi; - /* fall through */ + fallthrough; case 2: dp83640->edata.sec_lo = phy_txts->sec_lo; - /* fall through */ + fallthrough; case 1: dp83640->edata.ns_hi = phy_txts->ns_hi; - /* fall through */ + fallthrough; case 0: dp83640->edata.ns_lo = phy_txts->ns_lo; } @@@ -798,51 -798,32 +798,32 @@@ return parsed; }
- #define DP83640_PACKET_HASH_OFFSET 20 #define DP83640_PACKET_HASH_LEN 10
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) { - unsigned int offset = 0; - u8 *msgtype, *data = skb_mac_header(skb); - __be16 *seqid; + struct ptp_header *hdr; + u8 msgtype; + u16 seqid; u16 hash;
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
- if (type & PTP_CLASS_VLAN) - offset += VLAN_HLEN; - - switch (type & PTP_CLASS_PMASK) { - case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; - break; - case PTP_CLASS_IPV6: - offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; - break; - case PTP_CLASS_L2: - offset += ETH_HLEN; - break; - default: + hdr = ptp_parse_header(skb, type); + if (!hdr) return 0; - }
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid)) - return 0; + msgtype = ptp_get_msgtype(hdr, type);
- if (unlikely(type & PTP_CLASS_V1)) - msgtype = data + offset + OFF_PTP_CONTROL; - else - msgtype = data + offset; - if (rxts->msgtype != (*msgtype & 0xf)) + if (rxts->msgtype != (msgtype & 0xf)) return 0;
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID); - if (rxts->seqid != ntohs(*seqid)) + seqid = be16_to_cpu(hdr->sequence_id); + if (rxts->seqid != seqid) return 0;
hash = ether_crc(DP83640_PACKET_HASH_LEN, - data + offset + DP83640_PACKET_HASH_OFFSET) >> 20; + (unsigned char *)&hdr->source_port_identity) >> 20; if (rxts->hash != hash) return 0;
@@@ -982,35 -963,16 +963,16 @@@ static void decode_status_frame(struct
static int is_sync(struct sk_buff *skb, int type) { - u8 *data = skb->data, *msgtype; - unsigned int offset = 0; - - if (type & PTP_CLASS_VLAN) - offset += VLAN_HLEN; - - switch (type & PTP_CLASS_PMASK) { - case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; - break; - case PTP_CLASS_IPV6: - offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; - break; - case PTP_CLASS_L2: - offset += ETH_HLEN; - break; - default: - return 0; - } - - if (type & PTP_CLASS_V1) - offset += OFF_PTP_CONTROL; + struct ptp_header *hdr; + u8 msgtype;
- if (skb->len < offset + 1) + hdr = ptp_parse_header(skb, type); + if (!hdr) return 0;
- msgtype = data + offset; + msgtype = ptp_get_msgtype(hdr, type);
- return (*msgtype & 0xf) == 0; + return (msgtype & 0xf) == 0; }
static void dp83640_free_clocks(void) @@@ -1409,7 -1371,7 +1371,7 @@@ static void dp83640_txtstamp(struct mii kfree_skb(skb); return; } - /* fall through */ + fallthrough; case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; diff --combined drivers/net/phy/phylink.c index 32f4e8ec96cf,d0738302a958..fe2296fdda19 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@@ -535,8 -535,10 +535,10 @@@ static void phylink_mac_pcs_get_state(s
if (pl->pcs_ops) pl->pcs_ops->pcs_get_state(pl->pcs, state); - else + else if (pl->mac_ops->mac_pcs_get_state) pl->mac_ops->mac_pcs_get_state(pl->config, state); + else + state->link = 0; }
/* The fixed state is... fixed except for the link state, @@@ -1905,7 -1907,7 +1907,7 @@@ int phylink_mii_ioctl(struct phylink *p switch (cmd) { case SIOCGMIIPHY: mii->phy_id = pl->phydev->mdio.addr; - /* fall through */ + fallthrough;
case SIOCGMIIREG: ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); @@@ -1928,7 -1930,7 +1930,7 @@@ switch (cmd) { case SIOCGMIIPHY: mii->phy_id = 0; - /* fall through */ + fallthrough;
case SIOCGMIIREG: ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); @@@ -2318,6 -2320,49 +2320,49 @@@ static void phylink_decode_sgmii_word(s state->duplex = DUPLEX_HALF; }
+ /** + * phylink_decode_usxgmii_word() - decode the USXGMII word from a MAC PCS + * @state: a pointer to a struct phylink_link_state. + * @lpa: a 16 bit value which stores the USXGMII auto-negotiation word + * + * Helper for MAC PCS supporting the USXGMII protocol and the auto-negotiation + * code word. Decode the USXGMII code word and populate the corresponding fields + * (speed, duplex) into the phylink_link_state structure. + */ + void phylink_decode_usxgmii_word(struct phylink_link_state *state, + uint16_t lpa) + { + switch (lpa & MDIO_USXGMII_SPD_MASK) { + case MDIO_USXGMII_10: + state->speed = SPEED_10; + break; + case MDIO_USXGMII_100: + state->speed = SPEED_100; + break; + case MDIO_USXGMII_1000: + state->speed = SPEED_1000; + break; + case MDIO_USXGMII_2500: + state->speed = SPEED_2500; + break; + case MDIO_USXGMII_5000: + state->speed = SPEED_5000; + break; + case MDIO_USXGMII_10G: + state->speed = SPEED_10000; + break; + default: + state->link = false; + return; + } + + if (lpa & MDIO_USXGMII_FULL_DUPLEX) + state->duplex = DUPLEX_FULL; + else + state->duplex = DUPLEX_HALF; + } + EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); + /** * phylink_mii_c22_pcs_get_state() - read the MAC PCS state * @pcs: a pointer to a &struct mdio_device. @@@ -2361,6 -2406,7 +2406,7 @@@ void phylink_mii_c22_pcs_get_state(stru break;
case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: phylink_decode_sgmii_word(state, lpa); break;
diff --combined drivers/net/phy/sfp.c index cf83314c8591,5250dcdf46a4..1d18c10e8f82 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@@ -7,6 -7,7 +7,7 @@@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/jiffies.h> + #include <linux/mdio/mdio-i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> @@@ -16,7 -17,6 +17,6 @@@ #include <linux/slab.h> #include <linux/workqueue.h>
- #include "mdio-i2c.h" #include "sfp.h" #include "swphy.h"
@@@ -552,7 -552,7 +552,7 @@@ static umode_t sfp_hwmon_is_visible(con case hwmon_temp_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_temp_input: case hwmon_temp_label: return 0444; @@@ -571,7 -571,7 +571,7 @@@ case hwmon_in_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_in_input: case hwmon_in_label: return 0444; @@@ -590,7 -590,7 +590,7 @@@ case hwmon_curr_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_curr_input: case hwmon_curr_label: return 0444; @@@ -618,7 -618,7 +618,7 @@@ case hwmon_power_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_power_input: case hwmon_power_label: return 0444; @@@ -1872,7 -1872,7 +1872,7 @@@ static void sfp_sm_module(struct sfp *s dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0); - /* fall through */ + fallthrough; case SFP_MOD_WAITDEV: /* Ensure that the device is attached before proceeding */ if (sfp->sm_dev_state < SFP_DEV_DOWN) @@@ -1890,7 -1890,7 +1890,7 @@@ goto insert;
sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0); - /* fall through */ + fallthrough; case SFP_MOD_HPOWER: /* Enable high power mode */ err = sfp_sm_mod_hpower(sfp, true); diff --combined drivers/net/tun.c index 7959b5c2d11f,efaef83b8897..be69d272052f --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -219,24 -219,6 +219,6 @@@ struct veth __be16 h_vlan_TCI; };
- bool tun_is_xdp_frame(void *ptr) - { - return (unsigned long)ptr & TUN_XDP_FLAG; - } - EXPORT_SYMBOL(tun_is_xdp_frame); - - void *tun_xdp_to_ptr(void *ptr) - { - return (void *)((unsigned long)ptr | TUN_XDP_FLAG); - } - EXPORT_SYMBOL(tun_xdp_to_ptr); - - void *tun_ptr_to_xdp(void *ptr) - { - return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); - } - EXPORT_SYMBOL(tun_ptr_to_xdp); - static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@@ -1590,10 -1572,10 +1572,10 @@@ static int tun_xdp_act(struct tun_struc break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: this_cpu_inc(tun->pcpu_stats->rx_dropped); break; @@@ -2417,7 -2399,7 +2399,7 @@@ static int tun_xdp_one(struct tun_struc switch (err) { case XDP_REDIRECT: *flush = true; - /* fall through */ + fallthrough; case XDP_TX: return 0; case XDP_PASS: diff --combined drivers/net/usb/Kconfig index c7bcfca7d70b,0863f01937b3..b46993d5f997 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@@ -252,7 -252,6 +252,7 @@@ config USB_NET_CDC_EE config USB_NET_CDC_NCM tristate "CDC NCM support" depends on USB_USBNET + select USB_NET_CDCETHER default y help This driver provides support for CDC NCM (Network Control Model @@@ -346,6 -345,8 +346,8 @@@ config USB_NET_SMSC75X config USB_NET_SMSC95XX tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices" depends on USB_USBNET + select PHYLIB + select SMSC_PHY select BITREVERSE select CRC16 select CRC32 diff --combined drivers/net/veth.c index a475f48d43c4,b80cbffeb88e..7de8f0ea3f6b --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@@ -234,14 -234,14 +234,14 @@@ static bool veth_is_xdp_frame(void *ptr return (unsigned long)ptr & VETH_XDP_FLAG; }
- static void *veth_ptr_to_xdp(void *ptr) + static struct xdp_frame *veth_ptr_to_xdp(void *ptr) { return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); }
- static void *veth_xdp_to_ptr(void *ptr) + static void *veth_xdp_to_ptr(struct xdp_frame *xdp) { - return (void *)((unsigned long)ptr | VETH_XDP_FLAG); + return (void *)((unsigned long)xdp | VETH_XDP_FLAG); }
static void veth_ptr_free(void *ptr) @@@ -610,10 -610,10 +610,10 @@@ static struct sk_buff *veth_xdp_rcv_one goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto err_xdp; @@@ -745,10 -745,10 +745,10 @@@ static struct sk_buff *veth_xdp_rcv_skb goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; diff --combined drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index bd316dbd9041,084982eb6abd..7781530fb3e6 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@@ -650,12 -650,12 +650,12 @@@ mt7615_mcu_add_beacon_offload(struct mt memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len); req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); - if (offs.csa_counter_offs[0]) { + if (offs.cntdwn_counter_offs[0]) { u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4; + csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; req.csa_ie_pos = cpu_to_le16(csa_offs); - req.csa_cnt = skb->data[offs.csa_counter_offs[0]]; + req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]]; } dev_kfree_skb(skb);
@@@ -1713,10 -1713,10 +1713,10 @@@ mt7615_mcu_uni_add_beacon_offload(struc req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) { + if (offs.cntdwn_counter_offs[0]) { u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4; + csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); } dev_kfree_skb(skb); @@@ -2128,8 -2128,7 +2128,8 @@@ static int mt7615_load_n9(struct mt7615 sizeof(dev->mt76.hw->wiphy->fw_version), "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
- if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) { + if (!is_mt7615(&dev->mt76) && + !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) { dev->fw_ver = MT7615_FIRMWARE_V2; dev->mcu_ops = &sta_update_ops; } else { diff --combined drivers/s390/net/qeth_core_main.c index 6a7398251423,26bc8c15ffb8..e19640bc6daa --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -1071,7 -1071,7 +1071,7 @@@ static void qeth_issue_next_read_cb(str break; case -EIO: qeth_schedule_recovery(card); - /* fall through */ + fallthrough; default: qeth_clear_ipacmd_list(card); goto err_idx; @@@ -2702,6 -2702,7 +2702,7 @@@ static int qeth_alloc_qdio_queues(struc card->qdio.out_qs[i] = queue; queue->card = card; queue->queue_no = i; + spin_lock_init(&queue->lock); timer_setup(&queue->timer, qeth_tx_completion_timer, 0); queue->coalesce_usecs = QETH_TX_COALESCE_USECS; queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; @@@ -2886,7 -2887,7 +2887,7 @@@ void qeth_print_status_message(struct q card->info.mcl_level[3]); break; } - /* fallthrough */ + fallthrough; case QETH_CARD_TYPE_IQD: if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) @@@ -3068,7 -3069,6 +3069,6 @@@ static int qeth_init_qdio_queues(struc queue->bulk_max = qeth_tx_select_bulk_max(card, queue); atomic_set(&queue->used_buffers, 0); atomic_set(&queue->set_pci_flags_count, 0); - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); } return 0; @@@ -3549,8 -3549,9 +3549,9 @@@ static unsigned int qeth_rx_refill_queu
static void qeth_buffer_reclaim_work(struct work_struct *work) { - struct qeth_card *card = container_of(work, struct qeth_card, - buffer_reclaim_work.work); + struct qeth_card *card = container_of(to_delayed_work(work), + struct qeth_card, + buffer_reclaim_work);
local_bh_disable(); napi_schedule(&card->napi); @@@ -3740,37 -3741,31 +3741,31 @@@ static void qeth_flush_queue(struct qet
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { - int index; - int flush_cnt = 0; - int q_was_packing = 0; - /* * check if weed have to switch to non-packing mode or if * we have to get a pci flag out on the queue */ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || !atomic_read(&queue->set_pci_flags_count)) { - if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == - QETH_OUT_Q_UNLOCKED) { - /* - * If we get in here, there was no action in - * do_send_packet. So, we check if there is a - * packing buffer to be flushed here. - */ - index = queue->next_buf_to_fill; - q_was_packing = queue->do_pack; - /* queue->do_pack may change */ - barrier(); - flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); - if (!flush_cnt && - !atomic_read(&queue->set_pci_flags_count)) - flush_cnt += qeth_prep_flush_pack_buffer(queue); + unsigned int index, flush_cnt; + bool q_was_packing; + + spin_lock(&queue->lock); + + index = queue->next_buf_to_fill; + q_was_packing = queue->do_pack; + + flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); + if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) + flush_cnt = qeth_prep_flush_pack_buffer(queue); + + if (flush_cnt) { + qeth_flush_buffers(queue, index, flush_cnt); if (q_was_packing) QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); - if (flush_cnt) - qeth_flush_buffers(queue, index, flush_cnt); - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } + + spin_unlock(&queue->lock); } }
@@@ -4282,29 -4277,22 +4277,22 @@@ int qeth_do_send_packet(struct qeth_car unsigned int offset, unsigned int hd_len, int elements_needed) { + unsigned int start_index = queue->next_buf_to_fill; struct qeth_qdio_out_buffer *buffer; unsigned int next_element; struct netdev_queue *txq; bool stopped = false; - int start_index; int flush_count = 0; int do_pack = 0; - int tmp; int rc = 0;
- /* spin until we get the queue ... */ - while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); - start_index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill];
/* Just a sanity check, the wake/stop logic should ensure that we always * get a free buffer. */ - if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - }
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
@@@ -4327,8 -4315,6 +4315,6 @@@ QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, flush_count); - atomic_set(&queue->state, - QETH_OUT_Q_UNLOCKED); rc = -EBUSY; goto out; } @@@ -4360,31 -4346,8 +4346,8 @@@
if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); - else if (!atomic_read(&queue->set_pci_flags_count)) - atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); - /* - * queue->state will go from LOCKED -> UNLOCKED or from - * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us - * (switch packing state or flush buffer to get another pci flag out). - * In that case we will enter this loop - */ - while (atomic_dec_return(&queue->state)) { - start_index = queue->next_buf_to_fill; - /* check if we can go back to non-packing state */ - tmp = qeth_switch_to_nonpacking_if_needed(queue); - /* - * check if we need to flush a packing buffer to get a pci - * flag out on the queue - */ - if (!tmp && !atomic_read(&queue->set_pci_flags_count)) - tmp = qeth_prep_flush_pack_buffer(queue); - if (tmp) { - qeth_flush_buffers(queue, start_index, tmp); - flush_count += tmp; - } - } + out: - /* at this point the queue is UNLOCKED again */ if (do_pack) QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
@@@ -4458,8 -4421,10 +4421,10 @@@ int qeth_xmit(struct qeth_card *card, s } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); + spin_lock(&queue->lock); rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, hd_len, elements); + spin_unlock(&queue->lock); }
if (rc && !push_len) diff --combined drivers/s390/net/qeth_l2_main.c index 3a94f6cad167,b5bef5345dd6..491578009f12 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@@ -273,6 -273,17 +273,17 @@@ static int qeth_l2_vlan_rx_kill_vid(str return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); }
+ static void qeth_l2_set_pnso_mode(struct qeth_card *card, + enum qeth_pnso_mode mode) + { + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + WRITE_ONCE(card->info.pnso_mode, mode); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + if (mode == QETH_PNSO_NONE) + drain_workqueue(card->event_wq); + } + static void qeth_l2_stop_card(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "stopcard"); @@@ -290,7 -301,7 +301,7 @@@
qeth_qdio_clear_card(card, 0); qeth_clear_working_pool_list(card); - flush_workqueue(card->event_wq); + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); qeth_flush_local_addrs(card); card->info.promisc_mode = 0; } @@@ -488,7 -499,7 +499,7 @@@ static void qeth_l2_rx_mode_work(struc kfree(mac); break; } - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ mac->disp_flag = QETH_DISP_ADDR_DELETE; @@@ -810,8 -821,6 +821,6 @@@ static void qeth_l2_setup_bridgeport_at if (card->options.sbp.hostnotification) { if (qeth_bridgeport_an_set(card, 1)) card->options.sbp.hostnotification = 0; - } else { - qeth_bridgeport_an_set(card, 0); } }
@@@ -1090,15 -1099,14 +1099,14 @@@ static void qeth_bridge_emit_host_event struct qeth_bridge_state_data { struct work_struct worker; struct qeth_card *card; - struct qeth_sbp_state_change qports; + u8 role; + u8 state; };
static void qeth_bridge_state_change_worker(struct work_struct *work) { struct qeth_bridge_state_data *data = container_of(work, struct qeth_bridge_state_data, worker); - /* We are only interested in the first entry - local port */ - struct qeth_sbp_port_entry *entry = &data->qports.entry[0]; char env_locrem[32]; char env_role[32]; char env_state[32]; @@@ -1109,22 -1117,16 +1117,16 @@@ NULL };
- /* Role should not change by itself, but if it did, */ - /* information from the hardware is authoritative. */ - mutex_lock(&data->card->sbp_lock); - data->card->options.sbp.role = entry->role; - mutex_unlock(&data->card->sbp_lock); - snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_role, sizeof(env_role), "ROLE=%s", - (entry->role == QETH_SBP_ROLE_NONE) ? "none" : - (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : - (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : + (data->role == QETH_SBP_ROLE_NONE) ? "none" : + (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : + (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : "<INVALID>"); snprintf(env_state, sizeof(env_state), "STATE=%s", - (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : - (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" : - (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" : + (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : + (data->state == QETH_SBP_STATE_STANDBY) ? "standby" : + (data->state == QETH_SBP_STATE_ACTIVE) ? "active" : "<INVALID>"); kobject_uevent_env(&data->card->gdev->dev.kobj, KOBJ_CHANGE, env); @@@ -1134,10 -1136,8 +1136,8 @@@ static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd) { - struct qeth_sbp_state_change *qports = - &cmd->data.sbp.data.state_change; + struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data; struct qeth_bridge_state_data *data; - int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng"); if (qports->num_entries == 0) { @@@ -1148,34 -1148,50 +1148,50 @@@ QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; } - extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; - data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize, - GFP_ATOMIC); + + data = kzalloc(sizeof(*data), GFP_ATOMIC); if (!data) { QETH_CARD_TEXT(card, 2, "BPSalloc"); return; } INIT_WORK(&data->worker, qeth_bridge_state_change_worker); data->card = card; - memcpy(&data->qports, qports, - sizeof(struct qeth_sbp_state_change) + extrasize); + /* Information for the local port: */ + data->role = qports->entry[0].role; + data->state = qports->entry[0].state; + queue_work(card->event_wq, &data->worker); }
struct qeth_addr_change_data { - struct work_struct worker; + struct delayed_work dwork; struct qeth_card *card; struct qeth_ipacmd_addr_change ac_event; };
static void qeth_addr_change_event_worker(struct work_struct *work) { - struct qeth_addr_change_data *data = - container_of(work, struct qeth_addr_change_data, worker); + struct delayed_work *dwork = to_delayed_work(work); + struct qeth_addr_change_data *data; + struct qeth_card *card; int i;
+ data = container_of(dwork, struct qeth_addr_change_data, dwork); + card = data->card; + QETH_CARD_TEXT(data->card, 4, "adrchgew"); + + if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) + goto free; + if (data->ac_event.lost_event_mask) { + /* Potential re-config in progress, try again later: */ + if (!mutex_trylock(&card->sbp_lock)) { + queue_delayed_work(card->event_wq, dwork, + msecs_to_jiffies(100)); + return; + } + dev_info(&data->card->gdev->dev, "Address change notification stopped on %s (%s)\n", data->card->dev->name, @@@ -1184,8 -1200,9 +1200,9 @@@ : (data->ac_event.lost_event_mask == 0x02) ? "Bridge port state change" : "Unknown reason"); - mutex_lock(&data->card->sbp_lock); + data->card->options.sbp.hostnotification = 0; + card->info.pnso_mode = QETH_PNSO_NONE; mutex_unlock(&data->card->sbp_lock); qeth_bridge_emit_host_event(data->card, anev_abort, 0, NULL, NULL); @@@ -1199,6 -1216,8 +1216,8 @@@ &entry->token, &entry->addr_lnid); } + + free: kfree(data); }
@@@ -1210,6 -1229,9 +1229,9 @@@ static void qeth_addr_change_event(stru struct qeth_addr_change_data *data; int extrasize;
+ if (card->info.pnso_mode == QETH_PNSO_NONE) + return; + QETH_CARD_TEXT(card, 4, "adrchgev"); if (cmd->hdr.return_code != 0x0000) { if (cmd->hdr.return_code == 0x0010) { @@@ -1229,11 -1251,11 +1251,11 @@@ QETH_CARD_TEXT(card, 2, "ACNalloc"); return; } - INIT_WORK(&data->worker, qeth_addr_change_event_worker); + INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker); data->card = card; memcpy(&data->ac_event, hostevs, sizeof(struct qeth_ipacmd_addr_change) + extrasize); - queue_work(card->event_wq, &data->worker); + queue_delayed_work(card->event_wq, &data->dwork, 0); }
/* SETBRIDGEPORT support; sending commands */ @@@ -1418,8 -1440,8 +1440,8 @@@ static int qeth_bridgeport_query_ports_ struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports; struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + struct qeth_sbp_port_data *qports; int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb"); @@@ -1427,6 -1449,7 +1449,7 @@@ if (rc) return rc;
+ qports = &cmd->data.sbp.data.port_data; if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); return -EINVAL; @@@ -1554,9 -1577,14 +1577,14 @@@ int qeth_bridgeport_an_set(struct qeth_
if (enable) { qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); + qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT); rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card); - } else + if (rc) + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + } else { rc = qeth_l2_pnso(card, 0, NULL, NULL); + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + } return rc; }
diff --combined drivers/s390/net/qeth_l3_main.c index 4d461960370d,95df638de616..767c5bb7c24c --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@@ -314,7 -314,8 +314,8 @@@ static int qeth_l3_setdelip_cb(struct q }
static int qeth_l3_send_setdelmc(struct qeth_card *card, - struct qeth_ipaddr *addr, int ipacmd) + struct qeth_ipaddr *addr, + enum qeth_ipa_cmds ipacmd) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@@ -1235,7 -1236,7 +1236,7 @@@ static void qeth_l3_rx_mode_work(struc break; } addr->ref_counter = 1; - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ addr->disp_flag = QETH_DISP_ADDR_DELETE; diff --combined fs/io_uring.c index ce69bd9b0838,1fd03a38400c..94b9b5cf5971 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@@ -1150,7 -1150,7 +1150,7 @@@ static void io_prep_async_work(struct i io_req_init_async(req);
if (req->flags & REQ_F_ISREG) { - if (def->hash_reg_file) + if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); } else { if (def->unbound_nonreg_file) @@@ -1746,8 -1746,7 +1746,8 @@@ static struct io_kiocb *io_req_find_nex return __io_req_find_next(req); }
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) +static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, + bool twa_signal_ok) { struct task_struct *tsk = req->task; struct io_ring_ctx *ctx = req->ctx; @@@ -1760,7 -1759,7 +1760,7 @@@ * will do the job. */ notify = 0; - if (!(ctx->flags & IORING_SETUP_SQPOLL)) + if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify); @@@ -1820,7 -1819,7 +1820,7 @@@ static void io_req_task_queue(struct io init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs);
- ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk;
@@@ -2049,7 -2048,6 +2049,7 @@@ static void io_iopoll_complete(struct i
req = list_first_entry(done, struct io_kiocb, inflight_entry); if (READ_ONCE(req->result) == -EAGAIN) { + req->result = 0; req->iopoll_completed = 0; list_move_tail(&req->inflight_entry, &again); continue; @@@ -2295,6 -2293,22 +2295,6 @@@ end_req io_req_complete(req, ret); return false; } - -static void io_rw_resubmit(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - int err; - - err = io_sq_thread_acquire_mm(ctx, req); - - if (io_resubmit_prep(req, err)) { - refcount_inc(&req->refs); - io_queue_async_work(req); - } - - percpu_ref_put(&ctx->refs); -} #endif
static bool io_rw_reissue(struct io_kiocb *req, long res) @@@ -2305,14 -2319,12 +2305,14 @@@ if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) return false;
- init_task_work(&req->task_work, io_rw_resubmit); - percpu_ref_get(&req->ctx->refs); + ret = io_sq_thread_acquire_mm(req->ctx, req);
- ret = io_req_task_work_add(req, &req->task_work); - if (!ret) + if (io_resubmit_prep(req, ret)) { + refcount_inc(&req->refs); + io_queue_async_work(req); return true; + } + #endif return false; } @@@ -2551,7 -2563,7 +2551,7 @@@ static inline void io_rw_done(struct ki * IO with EINTR. */ ret = -EINTR; - /* fall through */ + fallthrough; default: kiocb->ki_complete(kiocb, ret, 0); } @@@ -2853,11 -2865,6 +2853,11 @@@ static ssize_t io_import_iovec(int rw, return iov_iter_count(&req->io->rw.iter); }
+static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) +{ + return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos; +} + /* * For files that don't have ->read_iter() and ->write_iter(), handle them * by looping over ->read() or ->write() manually. @@@ -2893,10 -2900,10 +2893,10 @@@ static ssize_t loop_rw_iter(int rw, str
if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); } else { nr = file->f_op->write(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); }
if (iov_iter_is_bvec(iter)) @@@ -3037,7 -3044,7 +3037,7 @@@ static int io_async_buf_func(struct wai
/* submit ref gets dropped, acquire a new one */ refcount_inc(&req->refs); - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk;
@@@ -3118,7 -3125,6 +3118,7 @@@ static int io_read(struct io_kiocb *req ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); io_size = ret; req->result = io_size; ret = 0; @@@ -3131,7 -3137,8 +3131,7 @@@ if (force_nonblock && !io_file_supports_async(req->file, READ)) goto copy_iov;
- iov_count = iov_iter_count(iter); - ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count); if (unlikely(ret)) goto out_free;
@@@ -3143,18 -3150,14 +3143,18 @@@ ret = 0; goto out_free; } else if (ret == -EAGAIN) { - if (!force_nonblock) + /* IOPOLL retry should happen for io-wq threads */ + if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) goto done; + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (ret) goto out_free; return -EAGAIN; } else if (ret < 0) { - goto out_free; + /* make sure -ERESTARTSYS -> -EINTR is done */ + goto done; }
/* read it all, or we did blocking attempt. no retry. */ @@@ -3238,7 -3241,6 +3238,7 @@@ static int io_write(struct io_kiocb *re ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); io_size = ret; req->result = io_size;
@@@ -3255,7 -3257,8 +3255,7 @@@ (req->flags & REQ_F_ISREG)) goto copy_iov;
- iov_count = iov_iter_count(iter); - ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count); if (unlikely(ret)) goto out_free;
@@@ -3288,14 -3291,9 +3288,14 @@@ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) ret2 = -EAGAIN; if (!force_nonblock || ret2 != -EAGAIN) { + /* IOPOLL retry should happen for io-wq threads */ + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) + goto copy_iov; kiocb_done(kiocb, ret2, cs); } else { copy_iov: + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (!ret) return -EAGAIN; @@@ -4568,7 -4566,6 +4568,7 @@@ struct io_poll_table static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { + bool twa_signal_ok; int ret;
/* for instances that support it check for an event match first: */ @@@ -4583,21 -4580,13 +4583,21 @@@ init_task_work(&req->task_work, func); percpu_ref_get(&req->ctx->refs);
+ /* + * If we using the signalfd wait_queue_head for this wakeup, then + * it's not safe to use TWA_SIGNAL as we could be recursing on the + * tsk->sighand->siglock on doing the wakeup. Should not be needed + * either, as the normal wakeup will suffice. + */ + twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); + /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead * of executing it. We can't safely execute it anyway, as we may not * have the needed state needed for it anyway. */ - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok); if (unlikely(ret)) { struct task_struct *tsk;
@@@ -4886,20 -4875,12 +4886,20 @@@ static bool io_arm_poll_handler(struct struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; + int rw;
if (!req->file || !file_can_poll(req->file)) return false; if (req->flags & REQ_F_POLLED) return false; - if (!def->pollin && !def->pollout) + if (def->pollin) + rw = READ; + else if (def->pollout) + rw = WRITE; + else + return false; + /* if we can't nonblock try, then no point in arming a poll handler */ + if (!io_file_supports_async(req->file, rw)) return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); @@@ -4917,6 -4898,12 +4917,12 @@@ mask |= POLLIN | POLLRDNORM; if (def->pollout) mask |= POLLOUT | POLLWRNORM; + + /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ + if ((req->opcode == IORING_OP_RECVMSG) && + (req->sr_msg.msg_flags & MSG_ERRQUEUE)) + mask &= ~POLLIN; + mask |= POLLERR | POLLPRI;
ipt.pt._qproc = io_async_queue_proc; @@@ -7452,6 -7439,9 +7458,6 @@@ static int io_sq_offload_start(struct i { int ret;
- mmgrab(current->mm); - ctx->sqo_mm = current->mm; - if (ctx->flags & IORING_SETUP_SQPOLL) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) @@@ -7496,6 -7486,10 +7502,6 @@@ return 0; err: io_finish_async(ctx); - if (ctx->sqo_mm) { - mmdrop(ctx->sqo_mm); - ctx->sqo_mm = NULL; - } return ret; }
@@@ -8545,9 -8539,6 +8551,9 @@@ static int io_uring_create(unsigned ent ctx->user = user; ctx->creds = get_current_cred();
+ mmgrab(current->mm); + ctx->sqo_mm = current->mm; + /* * Account memory _before_ installing the file descriptor. Once * the descriptor is installed, it can get closed at any time. Also diff --combined include/linux/filter.h index ebfb7cfb65f1,995625950cc1..05b4052715b9 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -1200,7 -1200,7 +1200,7 @@@ static inline u16 bpf_anc_helper(const BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } - /* Fallthrough. */ + fallthrough; default: return ftest->code; } @@@ -1236,13 -1236,17 +1236,17 @@@ struct bpf_sock_addr_kern
struct bpf_sock_ops_kern { struct sock *sk; - u32 op; union { u32 args[4]; u32 reply; u32 replylong[4]; }; - u32 is_fullsock; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; u64 temp; /* temp and everything after is not * initialized to 0 before calling * the BPF program. New fields that diff --combined init/Kconfig index 2a5df1cf838c,6ecc00e130ff..91456ac0ef20 --- a/init/Kconfig +++ b/init/Kconfig @@@ -682,8 -682,7 +682,8 @@@ config IKHEADER
config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" - range 12 25 + range 12 25 if !H8300 + range 12 19 if H8300 default 17 depends on PRINTK help @@@ -1692,6 -1691,7 +1692,7 @@@ config BPF_SYSCAL bool "Enable bpf() system call" select BPF select IRQ_WORK + select TASKS_TRACE_RCU default n help Enable the bpf() system call that allows to manipulate eBPF @@@ -1711,6 -1711,8 +1712,8 @@@ config BPF_JIT_DEFAULT_O def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON depends on HAVE_EBPF_JIT && BPF_JIT
+ source "kernel/bpf/preload/Kconfig" + config USERFAULTFD bool "Enable userfaultfd() system call" depends on MMU diff --combined kernel/bpf/cpumap.c index 6386b7bb98f2,cf548fc88780..7e1a8ad0c32a --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@@ -79,8 -79,6 +79,6 @@@ struct bpf_cpu_map
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
- static int bq_flush_to_queue(struct xdp_bulk_queue *bq); - static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) { u32 value_size = attr->value_size; @@@ -279,7 -277,7 +277,7 @@@ static int cpu_map_bpf_prog_run_xdp(str break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_DROP: xdp_return_frame(xdpf); stats->drop++; @@@ -658,6 -656,7 +656,7 @@@ static int cpu_map_get_next_key(struct
static int cpu_map_btf_id; const struct bpf_map_ops cpu_map_ops = { + .map_meta_equal = bpf_map_meta_equal, .map_alloc = cpu_map_alloc, .map_free = cpu_map_free, .map_delete_elem = cpu_map_delete_elem, @@@ -669,7 -668,7 +668,7 @@@ .map_btf_id = &cpu_map_btf_id, };
- static int bq_flush_to_queue(struct xdp_bulk_queue *bq) + static void bq_flush_to_queue(struct xdp_bulk_queue *bq) { struct bpf_cpu_map_entry *rcpu = bq->obj; unsigned int processed = 0, drops = 0; @@@ -678,7 -677,7 +677,7 @@@ int i;
if (unlikely(!bq->count)) - return 0; + return;
q = rcpu->queue; spin_lock(&q->producer_lock); @@@ -701,13 -700,12 +700,12 @@@
/* Feedback loop via tracepoints */ trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); - return 0; }
/* Runs under RCU-read-side, plus in softirq under NAPI protection. * Thus, safe percpu variable access. */ - static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) + static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) { struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); @@@ -728,8 -726,6 +726,6 @@@
if (!bq->flush_node.prev) list_add(&bq->flush_node, flush_list); - - return 0; }
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, diff --combined kernel/bpf/syscall.c index b999e7ff2583,4108ef3b828b..178c147350f5 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@@ -29,6 -29,7 +29,7 @@@ #include <linux/bpf_lsm.h> #include <linux/poll.h> #include <linux/bpf-netns.h> + #include <linux/rcupdate_trace.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@@ -90,6 -91,7 +91,7 @@@ int bpf_check_uarg_tail_zero(void __use }
const struct bpf_map_ops bpf_map_offload_ops = { + .map_meta_equal = bpf_map_meta_equal, .map_alloc = bpf_map_offload_map_alloc, .map_free = bpf_map_offload_map_free, .map_check_btf = map_check_no_btf, @@@ -157,10 -159,11 +159,11 @@@ static int bpf_map_update_value(struct if (bpf_map_is_dev_bound(map)) { return bpf_map_offload_update_elem(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || - map->map_type == BPF_MAP_TYPE_SOCKHASH || - map->map_type == BPF_MAP_TYPE_SOCKMAP || map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { return map->ops->map_update_elem(map, key, value, flags); + } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || + map->map_type == BPF_MAP_TYPE_SOCKMAP) { + return sock_map_update_elem_sys(map, key, value, flags); } else if (IS_FD_PROG_ARRAY(map)) { return bpf_fd_array_map_update_elem(map, f.file, key, value, flags); @@@ -768,7 -771,8 +771,8 @@@ static int map_check_btf(struct bpf_ma if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_ARRAY && map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && - map->map_type != BPF_MAP_TYPE_SK_STORAGE) + map->map_type != BPF_MAP_TYPE_SK_STORAGE && + map->map_type != BPF_MAP_TYPE_INODE_STORAGE) return -ENOTSUPP; if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > map->value_size) { @@@ -1728,10 -1732,14 +1732,14 @@@ static void __bpf_prog_put_noref(struc btf_put(prog->aux->btf); bpf_prog_free_linfo(prog);
- if (deferred) - call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); - else + if (deferred) { + if (prog->aux->sleepable) + call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); + else + call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); + } else { __bpf_prog_put_rcu(&prog->aux->rcu); + } }
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) @@@ -2029,7 -2037,7 +2037,7 @@@ bpf_prog_load_check_attach(enum bpf_pro case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; - /* fallthrough */ + fallthrough; default: return 0; } @@@ -2101,6 -2109,7 +2109,7 @@@ static int bpf_prog_load(union bpf_att if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT | BPF_F_TEST_STATE_FREQ | + BPF_F_SLEEPABLE | BPF_F_TEST_RND_HI32)) return -EINVAL;
@@@ -2156,6 -2165,7 +2165,7 @@@ }
prog->aux->offload_requested = !!attr->prog_ifindex; + prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
err = security_bpf_prog_alloc(prog->aux); if (err) @@@ -2634,7 -2644,7 +2644,7 @@@ static int bpf_raw_tp_link_fill_link_in u32 ulen = info->raw_tracepoint.tp_name_len; size_t tp_len = strlen(tp_name);
- if (ulen && !ubuf) + if (!ulen ^ !ubuf) return -EINVAL;
info->raw_tracepoint.tp_name_len = tp_len + 1; @@@ -4014,40 -4024,50 +4024,50 @@@ static int link_detach(union bpf_attr * return ret; }
- static int bpf_link_inc_not_zero(struct bpf_link *link) + static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) { - return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT; + return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); }
- #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id - - static int bpf_link_get_fd_by_id(const union bpf_attr *attr) + struct bpf_link *bpf_link_by_id(u32 id) { struct bpf_link *link; - u32 id = attr->link_id; - int fd, err;
- if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + if (!id) + return ERR_PTR(-ENOENT);
spin_lock_bh(&link_idr_lock); - link = idr_find(&link_idr, id); /* before link is "settled", ID is 0, pretend it doesn't exist yet */ + link = idr_find(&link_idr, id); if (link) { if (link->id) - err = bpf_link_inc_not_zero(link); + link = bpf_link_inc_not_zero(link); else - err = -EAGAIN; + link = ERR_PTR(-EAGAIN); } else { - err = -ENOENT; + link = ERR_PTR(-ENOENT); } spin_unlock_bh(&link_idr_lock); + return link; + }
- if (err) - return err; + #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id + + static int bpf_link_get_fd_by_id(const union bpf_attr *attr) + { + struct bpf_link *link; + u32 id = attr->link_id; + int fd; + + if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + link = bpf_link_by_id(id); + if (IS_ERR(link)) + return PTR_ERR(link);
fd = bpf_link_new_fd(link); if (fd < 0) diff --combined kernel/bpf/verifier.c index 47e74f09fa37,b4e9c56b8b32..86fdebb5ffd8 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -21,6 -21,7 +21,7 @@@ #include <linux/ctype.h> #include <linux/error-injection.h> #include <linux/bpf_lsm.h> + #include <linux/btf_ids.h>
#include "disasm.h"
@@@ -2625,11 -2626,19 +2626,19 @@@ static int check_map_access(struct bpf_
#define MAX_PACKET_OFF 0xffff
+ static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) + { + return prog->aux->linked_prog ? prog->aux->linked_prog->type + : prog->type; + } + static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { - switch (env->prog->type) { + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); + + switch (prog_type) { /* Program types only with direct read access go here! */ case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: @@@ -3872,6 -3881,33 +3881,33 @@@ static int int_ptr_type_to_size(enum bp return -EINVAL; }
+ static int resolve_map_arg_type(struct bpf_verifier_env *env, + const struct bpf_call_arg_meta *meta, + enum bpf_arg_type *arg_type) + { + if (!meta->map_ptr) { + /* kernel subsystem misconfigured verifier */ + verbose(env, "invalid map_ptr to access map->type\n"); + return -EACCES; + } + + switch (meta->map_ptr->map_type) { + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_SOCKHASH: + if (*arg_type == ARG_PTR_TO_MAP_VALUE) { + *arg_type = ARG_PTR_TO_SOCKET; + } else { + verbose(env, "invalid arg_type for sockmap/sockhash\n"); + return -EINVAL; + } + break; + + default: + break; + } + return 0; + } + static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) @@@ -3904,6 -3940,14 +3940,14 @@@ return -EACCES; }
+ if (arg_type == ARG_PTR_TO_MAP_VALUE || + arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || + arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { + err = resolve_map_arg_type(env, meta, &arg_type); + if (err) + return err; + } + if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || @@@ -3960,16 -4004,21 +4004,21 @@@ goto err_type; } } else if (arg_type == ARG_PTR_TO_BTF_ID) { + bool ids_match = false; + expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; if (!fn->check_btf_id) { if (reg->btf_id != meta->btf_id) { - verbose(env, "Helper has type %s got %s in R%d\n", - kernel_type_name(meta->btf_id), - kernel_type_name(reg->btf_id), regno); - - return -EACCES; + ids_match = btf_struct_ids_match(&env->log, reg->off, reg->btf_id, + meta->btf_id); + if (!ids_match) { + verbose(env, "Helper has type %s got %s in R%d\n", + kernel_type_name(meta->btf_id), + kernel_type_name(reg->btf_id), regno); + return -EACCES; + } } } else if (!fn->check_btf_id(reg->btf_id, arg)) { verbose(env, "Helper does not support %s in R%d\n", @@@ -3977,7 -4026,7 +4026,7 @@@
return -EACCES; } - if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { + if ((reg->off && !ids_match) || !tnum_is_const(reg->var_off) || reg->var_off.value) { verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", regno); return -EACCES; @@@ -4143,6 -4192,38 +4192,38 @@@ err_type return -EACCES; }
+ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) + { + enum bpf_attach_type eatype = env->prog->expected_attach_type; + enum bpf_prog_type type = resolve_prog_type(env->prog); + + if (func_id != BPF_FUNC_map_update_elem) + return false; + + /* It's not possible to get access to a locked struct sock in these + * contexts, so updating is safe. + */ + switch (type) { + case BPF_PROG_TYPE_TRACING: + if (eatype == BPF_TRACE_ITER) + return true; + break; + case BPF_PROG_TYPE_SOCKET_FILTER: + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + case BPF_PROG_TYPE_XDP: + case BPF_PROG_TYPE_SK_REUSEPORT: + case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_SK_LOOKUP: + return true; + default: + break; + } + + verbose(env, "cannot update sockmap in this context\n"); + return false; + } + static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { @@@ -4214,7 -4295,8 +4295,8 @@@ func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map && func_id != BPF_FUNC_sk_select_reuseport && - func_id != BPF_FUNC_map_lookup_elem) + func_id != BPF_FUNC_map_lookup_elem && + !may_update_sockmap(env, func_id)) goto error; break; case BPF_MAP_TYPE_SOCKHASH: @@@ -4223,7 -4305,8 +4305,8 @@@ func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash && func_id != BPF_FUNC_sk_select_reuseport && - func_id != BPF_FUNC_map_lookup_elem) + func_id != BPF_FUNC_map_lookup_elem && + !may_update_sockmap(env, func_id)) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: @@@ -4242,6 -4325,11 +4325,11 @@@ func_id != BPF_FUNC_sk_storage_delete) goto error; break; + case BPF_MAP_TYPE_INODE_STORAGE: + if (func_id != BPF_FUNC_inode_storage_get && + func_id != BPF_FUNC_inode_storage_delete) + goto error; + break; default: break; } @@@ -4315,6 -4403,11 +4403,11 @@@ if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) goto error; break; + case BPF_FUNC_inode_storage_get: + case BPF_FUNC_inode_storage_delete: + if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) + goto error; + break; default: break; } @@@ -4775,6 -4868,11 +4868,11 @@@ static int check_helper_call(struct bpf return -EINVAL; }
+ if (fn->allowed && !fn->allowed(env->prog)) { + verbose(env, "helper call is not allowed in probe\n"); + return -EINVAL; + } + /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { @@@ -5236,7 -5334,7 +5334,7 @@@ static int adjust_ptr_min_max_vals(stru off_reg == dst_reg ? dst : src); return -EACCES; } - /* fall-through */ + fallthrough; default: break; } @@@ -5732,6 -5830,67 +5830,67 @@@ static void scalar_min_max_or(struct bp __update_reg_bounds(dst_reg); }
+ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) + { + bool src_known = tnum_subreg_is_const(src_reg->var_off); + bool dst_known = tnum_subreg_is_const(dst_reg->var_off); + struct tnum var32_off = tnum_subreg(dst_reg->var_off); + s32 smin_val = src_reg->s32_min_value; + + /* Assuming scalar64_min_max_xor will be called so it is safe + * to skip updating register for known case. + */ + if (src_known && dst_known) + return; + + /* We get both minimum and maximum from the var32_off. */ + dst_reg->u32_min_value = var32_off.value; + dst_reg->u32_max_value = var32_off.value | var32_off.mask; + + if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { + /* XORing two positive sign numbers gives a positive, + * so safe to cast u32 result into s32. + */ + dst_reg->s32_min_value = dst_reg->u32_min_value; + dst_reg->s32_max_value = dst_reg->u32_max_value; + } else { + dst_reg->s32_min_value = S32_MIN; + dst_reg->s32_max_value = S32_MAX; + } + } + + static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) + { + bool src_known = tnum_is_const(src_reg->var_off); + bool dst_known = tnum_is_const(dst_reg->var_off); + s64 smin_val = src_reg->smin_value; + + if (src_known && dst_known) { + /* dst_reg->var_off.value has been updated earlier */ + __mark_reg_known(dst_reg, dst_reg->var_off.value); + return; + } + + /* We get both minimum and maximum from the var_off. */ + dst_reg->umin_value = dst_reg->var_off.value; + dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; + + if (dst_reg->smin_value >= 0 && smin_val >= 0) { + /* XORing two positive sign numbers gives a positive, + * so safe to cast u64 result into s64. + */ + dst_reg->smin_value = dst_reg->umin_value; + dst_reg->smax_value = dst_reg->umax_value; + } else { + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } + + __update_reg_bounds(dst_reg); + } + static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, u64 umin_val, u64 umax_val) { @@@ -6040,6 -6199,11 +6199,11 @@@ static int adjust_scalar_min_max_vals(s scalar32_min_max_or(dst_reg, &src_reg); scalar_min_max_or(dst_reg, &src_reg); break; + case BPF_XOR: + dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); + scalar32_min_max_xor(dst_reg, &src_reg); + scalar_min_max_xor(dst_reg, &src_reg); + break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. @@@ -7287,7 -7451,7 +7451,7 @@@ static int check_ld_abs(struct bpf_veri u8 mode = BPF_MODE(insn->code); int i, err;
- if (!may_access_skb(env->prog->type)) { + if (!may_access_skb(resolve_prog_type(env->prog))) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } @@@ -7375,11 -7539,12 +7539,12 @@@ static int check_return_code(struct bpf const struct bpf_prog *prog = env->prog; struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); int err;
/* LSM and struct_ops func-ptr's return type could be "void" */ - if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS || - env->prog->type == BPF_PROG_TYPE_LSM) && + if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS || + prog_type == BPF_PROG_TYPE_LSM) && !prog->aux->attach_func_proto->type) return 0;
@@@ -7398,7 -7563,7 +7563,7 @@@ return -EACCES; }
- switch (env->prog->type) { + switch (prog_type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || @@@ -9154,6 -9319,7 +9319,7 @@@ static int check_map_prog_compatibility struct bpf_prog *prog)
{ + enum bpf_prog_type prog_type = resolve_prog_type(prog); /* * Validate that trace type programs use preallocated hash maps. * @@@ -9171,8 -9337,8 +9337,8 @@@ * now, but warnings are emitted so developers are made aware of * the unsafety and can fix their programs before this is enforced. */ - if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) { - if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { + if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) { + if (prog_type == BPF_PROG_TYPE_PERF_EVENT) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } @@@ -9184,8 -9350,8 +9350,8 @@@ verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); }
- if ((is_tracing_prog_type(prog->type) || - prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && + if ((is_tracing_prog_type(prog_type) || + prog_type == BPF_PROG_TYPE_SOCKET_FILTER) && map_value_has_spin_lock(map)) { verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); return -EINVAL; @@@ -9202,6 -9368,23 +9368,23 @@@ return -EINVAL; }
+ if (prog->aux->sleepable) + switch (map->map_type) { + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_LRU_HASH: + case BPF_MAP_TYPE_ARRAY: + if (!is_preallocated_map(map)) { + verbose(env, + "Sleepable programs can only use preallocated hash maps\n"); + return -EINVAL; + } + break; + default: + verbose(env, + "Sleepable programs can only use array and hash maps\n"); + return -EINVAL; + } + return 0; }
@@@ -9897,7 -10080,7 +10080,7 @@@ static int convert_ctx_accesses(struct insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code); env->prog->aux->num_exentries++; - } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) { + } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) { verbose(env, "Writes through BTF pointers are not allowed\n"); return -EINVAL; } @@@ -10820,6 -11003,37 +11003,37 @@@ static int check_attach_modify_return(s return -EINVAL; }
+ /* non exhaustive list of sleepable bpf_lsm_*() functions */ + BTF_SET_START(btf_sleepable_lsm_hooks) + #ifdef CONFIG_BPF_LSM + BTF_ID(func, bpf_lsm_bprm_committed_creds) + #else + BTF_ID_UNUSED + #endif + BTF_SET_END(btf_sleepable_lsm_hooks) + + static int check_sleepable_lsm_hook(u32 btf_id) + { + return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id); + } + + /* list of non-sleepable functions that are otherwise on + * ALLOW_ERROR_INJECTION list + */ + BTF_SET_START(btf_non_sleepable_error_inject) + /* Three functions below can be called from sleepable and non-sleepable context. + * Assume non-sleepable from bpf safety point of view. + */ + BTF_ID(func, __add_to_page_cache_locked) + BTF_ID(func, should_fail_alloc_page) + BTF_ID(func, should_failslab) + BTF_SET_END(btf_non_sleepable_error_inject) + + static int check_non_sleepable_error_inject(u32 btf_id) + { + return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); + } + static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; @@@ -10837,6 -11051,12 +11051,12 @@@ long addr; u64 key;
+ if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && + prog->type != BPF_PROG_TYPE_LSM) { + verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n"); + return -EINVAL; + } + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) return check_struct_ops_btf_id(env);
@@@ -10988,7 -11208,7 +11208,7 @@@ default: if (!prog_extension) return -EINVAL; - /* fallthrough */ + fallthrough; case BPF_MODIFY_RETURN: case BPF_LSM_MAC: case BPF_TRACE_FENTRY: @@@ -11045,13 -11265,36 +11265,36 @@@ } }
- if (prog->expected_attach_type == BPF_MODIFY_RETURN) { + if (prog->aux->sleepable) { + ret = -EINVAL; + switch (prog->type) { + case BPF_PROG_TYPE_TRACING: + /* fentry/fexit/fmod_ret progs can be sleepable only if they are + * attached to ALLOW_ERROR_INJECTION and are not in denylist. + */ + if (!check_non_sleepable_error_inject(btf_id) && + within_error_injection_list(addr)) + ret = 0; + break; + case BPF_PROG_TYPE_LSM: + /* LSM progs check that they are attached to bpf_lsm_*() funcs. + * Only some of them are sleepable. + */ + if (check_sleepable_lsm_hook(btf_id)) + ret = 0; + break; + default: + break; + } + if (ret) + verbose(env, "%s is not sleepable\n", + prog->aux->attach_func_name); + } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { ret = check_attach_modify_return(prog, addr); if (ret) verbose(env, "%s() is not modifiable\n", prog->aux->attach_func_name); } - if (ret) goto out; tr->func.addr = (void *)addr; diff --combined net/batman-adv/bat_v_ogm.c index 717fe657561d,11c3f98ba938..8c1148fc73d7 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@@ -20,6 -20,7 +20,7 @@@ #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/netdevice.h> + #include <linux/prandom.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> @@@ -881,12 -882,6 +882,12 @@@ static void batadv_v_ogm_process(const ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@@ -1014,6 -1009,11 +1015,6 @@@ int batadv_v_ogm_packet_recv(struct sk_ if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); diff --combined net/batman-adv/bridge_loop_avoidance.c index 8500f56cbd10,5c41cc52bc53..ab6cec3c7586 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -437,10 -437,7 +437,10 @@@ static void batadv_bla_send_claim(struc batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN);
- netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); @@@ -1798,7 -1795,7 +1798,7 @@@ batadv_bla_loopdetect_check(struct bata
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function + /* backbone_gw is unreferenced in the report work function * if queue_work() call was successful */ if (!ret) diff --combined net/core/devlink.c index 80ec1cd81c64,58c8bb07fa19..49e911c19881 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@@ -5895,6 -5895,7 +5895,7 @@@ devlink_nl_cmd_health_reporter_get_dump list_for_each_entry(devlink, &devlink_list, list) { if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) continue; + mutex_lock(&devlink->lock); list_for_each_entry(port, &devlink->port_list, list) { mutex_lock(&port->reporters_lock); list_for_each_entry(reporter, &port->reporter_list, list) { @@@ -5909,12 -5910,14 +5910,14 @@@ NLM_F_MULTI); if (err) { mutex_unlock(&port->reporters_lock); + mutex_unlock(&devlink->lock); goto out; } idx++; } mutex_unlock(&port->reporters_lock); } + mutex_unlock(&devlink->lock); } out: mutex_unlock(&devlink_mutex); @@@ -6196,8 -6199,8 +6199,8 @@@ devlink_trap_action_get_from_info(struc
val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]); switch (val) { - case DEVLINK_TRAP_ACTION_DROP: /* fall-through */ - case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */ + case DEVLINK_TRAP_ACTION_DROP: + case DEVLINK_TRAP_ACTION_TRAP: case DEVLINK_TRAP_ACTION_MIRROR: *p_trap_action = val; break; @@@ -7555,11 -7558,11 +7558,11 @@@ int devlink_port_register(struct devlin devlink_port->index = port_index; devlink_port->registered = true; spin_lock_init(&devlink_port->type_lock); + INIT_LIST_HEAD(&devlink_port->reporter_list); + mutex_init(&devlink_port->reporters_lock); list_add_tail(&devlink_port->list, &devlink->port_list); INIT_LIST_HEAD(&devlink_port->param_list); mutex_unlock(&devlink->lock); - INIT_LIST_HEAD(&devlink_port->reporter_list); - mutex_init(&devlink_port->reporters_lock); INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); devlink_port_type_warn_schedule(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); @@@ -7576,13 -7579,13 +7579,13 @@@ void devlink_port_unregister(struct dev { struct devlink *devlink = devlink_port->devlink;
- WARN_ON(!list_empty(&devlink_port->reporter_list)); - mutex_destroy(&devlink_port->reporters_lock); devlink_port_type_warn_cancel(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); mutex_lock(&devlink->lock); list_del(&devlink_port->list); mutex_unlock(&devlink->lock); + WARN_ON(!list_empty(&devlink_port->reporter_list)); + mutex_destroy(&devlink_port->reporters_lock); } EXPORT_SYMBOL_GPL(devlink_port_unregister);
diff --combined net/core/filter.c index 1f647ab986b6,47eef9a0be6a..2ad9c0ef1946 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -4459,6 -4459,7 +4459,7 @@@ static int _bpf_setsockopt(struct sock } else { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + unsigned long timeout;
if (optlen != sizeof(int)) return -EINVAL; @@@ -4480,6 -4481,20 +4481,20 @@@ tp->snd_ssthresh = val; } break; + case TCP_BPF_DELACK_MAX: + timeout = usecs_to_jiffies(val); + if (timeout > TCP_DELACK_MAX || + timeout < TCP_TIMEOUT_MIN) + return -EINVAL; + inet_csk(sk)->icsk_delack_max = timeout; + break; + case TCP_BPF_RTO_MIN: + timeout = usecs_to_jiffies(val); + if (timeout > TCP_RTO_MIN || + timeout < TCP_TIMEOUT_MIN) + return -EINVAL; + inet_csk(sk)->icsk_rto_min = timeout; + break; case TCP_SAVE_SYN: if (val < 0 || val > 1) ret = -EINVAL; @@@ -4550,9 -4565,9 +4565,9 @@@ static int _bpf_getsockopt(struct sock tp = tcp_sk(sk);
if (optlen <= 0 || !tp->saved_syn || - optlen > tp->saved_syn[0]) + optlen > tcp_saved_syn_len(tp->saved_syn)) goto err_clear; - memcpy(optval, tp->saved_syn + 1, optlen); + memcpy(optval, tp->saved_syn->data, optlen); break; default: goto err_clear; @@@ -4654,9 -4669,99 +4669,99 @@@ static const struct bpf_func_proto bpf_ .arg5_type = ARG_CONST_SIZE, };
+ static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, + int optname, const u8 **start) + { + struct sk_buff *syn_skb = bpf_sock->syn_skb; + const u8 *hdr_start; + int ret; + + if (syn_skb) { + /* sk is a request_sock here */ + + if (optname == TCP_BPF_SYN) { + hdr_start = syn_skb->data; + ret = tcp_hdrlen(syn_skb); + } else if (optname == TCP_BPF_SYN_IP) { + hdr_start = skb_network_header(syn_skb); + ret = skb_network_header_len(syn_skb) + + tcp_hdrlen(syn_skb); + } else { + /* optname == TCP_BPF_SYN_MAC */ + hdr_start = skb_mac_header(syn_skb); + ret = skb_mac_header_len(syn_skb) + + skb_network_header_len(syn_skb) + + tcp_hdrlen(syn_skb); + } + } else { + struct sock *sk = bpf_sock->sk; + struct saved_syn *saved_syn; + + if (sk->sk_state == TCP_NEW_SYN_RECV) + /* synack retransmit. bpf_sock->syn_skb will + * not be available. It has to resort to + * saved_syn (if it is saved). + */ + saved_syn = inet_reqsk(sk)->saved_syn; + else + saved_syn = tcp_sk(sk)->saved_syn; + + if (!saved_syn) + return -ENOENT; + + if (optname == TCP_BPF_SYN) { + hdr_start = saved_syn->data + + saved_syn->mac_hdrlen + + saved_syn->network_hdrlen; + ret = saved_syn->tcp_hdrlen; + } else if (optname == TCP_BPF_SYN_IP) { + hdr_start = saved_syn->data + + saved_syn->mac_hdrlen; + ret = saved_syn->network_hdrlen + + saved_syn->tcp_hdrlen; + } else { + /* optname == TCP_BPF_SYN_MAC */ + + /* TCP_SAVE_SYN may not have saved the mac hdr */ + if (!saved_syn->mac_hdrlen) + return -ENOENT; + + hdr_start = saved_syn->data; + ret = saved_syn->mac_hdrlen + + saved_syn->network_hdrlen + + saved_syn->tcp_hdrlen; + } + } + + *start = hdr_start; + return ret; + } + BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { + if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && + optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { + int ret, copy_len = 0; + const u8 *start; + + ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start); + if (ret > 0) { + copy_len = ret; + if (optlen < copy_len) { + copy_len = optlen; + ret = -ENOSPC; + } + + memcpy(optval, start, copy_len); + } + + /* Zero out unused buffer at the end */ + memset(optval + copy_len, 0, optlen - copy_len); + + return ret; + } + return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen); }
@@@ -6150,6 -6255,232 +6255,232 @@@ static const struct bpf_func_proto bpf_ .arg3_type = ARG_ANYTHING, };
+ static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend, + u8 search_kind, const u8 *magic, + u8 magic_len, bool *eol) + { + u8 kind, kind_len; + + *eol = false; + + while (op < opend) { + kind = op[0]; + + if (kind == TCPOPT_EOL) { + *eol = true; + return ERR_PTR(-ENOMSG); + } else if (kind == TCPOPT_NOP) { + op++; + continue; + } + + if (opend - op < 2 || opend - op < op[1] || op[1] < 2) + /* Something is wrong in the received header. + * Follow the TCP stack's tcp_parse_options() + * and just bail here. + */ + return ERR_PTR(-EFAULT); + + kind_len = op[1]; + if (search_kind == kind) { + if (!magic_len) + return op; + + if (magic_len > kind_len - 2) + return ERR_PTR(-ENOMSG); + + if (!memcmp(&op[2], magic, magic_len)) + return op; + } + + op += kind_len; + } + + return ERR_PTR(-ENOMSG); + } + + BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, + void *, search_res, u32, len, u64, flags) + { + bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN; + const u8 *op, *opend, *magic, *search = search_res; + u8 search_kind, search_len, copy_len, magic_len; + int ret; + + /* 2 byte is the minimal option len except TCPOPT_NOP and + * TCPOPT_EOL which are useless for the bpf prog to learn + * and this helper disallow loading them also. + */ + if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN) + return -EINVAL; + + search_kind = search[0]; + search_len = search[1]; + + if (search_len > len || search_kind == TCPOPT_NOP || + search_kind == TCPOPT_EOL) + return -EINVAL; + + if (search_kind == TCPOPT_EXP || search_kind == 253) { + /* 16 or 32 bit magic. +2 for kind and kind length */ + if (search_len != 4 && search_len != 6) + return -EINVAL; + magic = &search[2]; + magic_len = search_len - 2; + } else { + if (search_len) + return -EINVAL; + magic = NULL; + magic_len = 0; + } + + if (load_syn) { + ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op); + if (ret < 0) + return ret; + + opend = op + ret; + op += sizeof(struct tcphdr); + } else { + if (!bpf_sock->skb || + bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB) + /* This bpf_sock->op cannot call this helper */ + return -EPERM; + + opend = bpf_sock->skb_data_end; + op = bpf_sock->skb->data + sizeof(struct tcphdr); + } + + op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len, + &eol); + if (IS_ERR(op)) + return PTR_ERR(op); + + copy_len = op[1]; + ret = copy_len; + if (copy_len > len) { + ret = -ENOSPC; + copy_len = len; + } + + memcpy(search_res, op, copy_len); + return ret; + } + + static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = { + .func = bpf_sock_ops_load_hdr_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + }; + + BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, + const void *, from, u32, len, u64, flags) + { + u8 new_kind, new_kind_len, magic_len = 0, *opend; + const u8 *op, *new_op, *magic = NULL; + struct sk_buff *skb; + bool eol; + + if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB) + return -EPERM; + + if (len < 2 || flags) + return -EINVAL; + + new_op = from; + new_kind = new_op[0]; + new_kind_len = new_op[1]; + + if (new_kind_len > len || new_kind == TCPOPT_NOP || + new_kind == TCPOPT_EOL) + return -EINVAL; + + if (new_kind_len > bpf_sock->remaining_opt_len) + return -ENOSPC; + + /* 253 is another experimental kind */ + if (new_kind == TCPOPT_EXP || new_kind == 253) { + if (new_kind_len < 4) + return -EINVAL; + /* Match for the 2 byte magic also. + * RFC 6994: the magic could be 2 or 4 bytes. + * Hence, matching by 2 byte only is on the + * conservative side but it is the right + * thing to do for the 'search-for-duplication' + * purpose. + */ + magic = &new_op[2]; + magic_len = 2; + } + + /* Check for duplication */ + skb = bpf_sock->skb; + op = skb->data + sizeof(struct tcphdr); + opend = bpf_sock->skb_data_end; + + op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len, + &eol); + if (!IS_ERR(op)) + return -EEXIST; + + if (PTR_ERR(op) != -ENOMSG) + return PTR_ERR(op); + + if (eol) + /* The option has been ended. Treat it as no more + * header option can be written. + */ + return -ENOSPC; + + /* No duplication found. Store the header option. */ + memcpy(opend, from, new_kind_len); + + bpf_sock->remaining_opt_len -= new_kind_len; + bpf_sock->skb_data_end += new_kind_len; + + return 0; + } + + static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = { + .func = bpf_sock_ops_store_hdr_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + }; + + BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, + u32, len, u64, flags) + { + if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB) + return -EPERM; + + if (flags || len < 2) + return -EINVAL; + + if (len > bpf_sock->remaining_opt_len) + return -ENOSPC; + + bpf_sock->remaining_opt_len -= len; + + return 0; + } + + static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { + .func = bpf_sock_ops_reserve_hdr_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + }; + #endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func) @@@ -6178,6 -6509,9 +6509,9 @@@ func == bpf_lwt_seg6_store_bytes || func == bpf_lwt_seg6_adjust_srh || func == bpf_lwt_seg6_action || + #endif + #ifdef CONFIG_INET + func == bpf_sock_ops_store_hdr_opt || #endif func == bpf_lwt_in_push_encap || func == bpf_lwt_xmit_push_encap) @@@ -6550,6 -6884,12 +6884,12 @@@ sock_ops_func_proto(enum bpf_func_id fu case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; #ifdef CONFIG_INET + case BPF_FUNC_load_hdr_opt: + return &bpf_sock_ops_load_hdr_opt_proto; + case BPF_FUNC_store_hdr_opt: + return &bpf_sock_ops_store_hdr_opt_proto; + case BPF_FUNC_reserve_hdr_opt: + return &bpf_sock_ops_reserve_hdr_opt_proto; case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; #endif /* CONFIG_INET */ @@@ -7349,6 -7689,20 +7689,20 @@@ static bool sock_ops_is_valid_access(in return false; info->reg_type = PTR_TO_SOCKET_OR_NULL; break; + case offsetof(struct bpf_sock_ops, skb_data): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_PACKET; + break; + case offsetof(struct bpf_sock_ops, skb_data_end): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_PACKET_END; + break; + case offsetof(struct bpf_sock_ops, skb_tcp_flags): + bpf_ctx_record_field_size(info, size_default); + return bpf_ctx_narrow_access_ok(off, size, + size_default); default: if (size != size_default) return false; @@@ -8450,17 -8804,22 +8804,22 @@@ static u32 sock_ops_convert_ctx_access( return insn - insn_buf;
switch (si->off) { - case offsetof(struct bpf_sock_ops, op) ... + case offsetof(struct bpf_sock_ops, op): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + op), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, op)); + break; + + case offsetof(struct bpf_sock_ops, replylong[0]) ... offsetof(struct bpf_sock_ops, replylong[3]): - BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) != - sizeof_field(struct bpf_sock_ops_kern, op)); BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != sizeof_field(struct bpf_sock_ops_kern, reply)); BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != sizeof_field(struct bpf_sock_ops_kern, replylong)); off = si->off; - off -= offsetof(struct bpf_sock_ops, op); - off += offsetof(struct bpf_sock_ops_kern, op); + off -= offsetof(struct bpf_sock_ops, replylong[0]); + off += offsetof(struct bpf_sock_ops_kern, replylong[0]); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, off); @@@ -8681,6 -9040,49 +9040,49 @@@ case offsetof(struct bpf_sock_ops, sk): SOCK_OPS_GET_SK(); break; + case offsetof(struct bpf_sock_ops, skb_data_end): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb_data_end), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb_data_end)); + break; + case offsetof(struct bpf_sock_ops, skb_data): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), + si->dst_reg, si->dst_reg, + offsetof(struct sk_buff, data)); + break; + case offsetof(struct bpf_sock_ops, skb_len): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), + si->dst_reg, si->dst_reg, + offsetof(struct sk_buff, len)); + break; + case offsetof(struct bpf_sock_ops, skb_tcp_flags): + off = offsetof(struct sk_buff, cb); + off += offsetof(struct tcp_skb_cb, tcp_flags); + *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb, + tcp_flags), + si->dst_reg, si->dst_reg, off); + break; } return insn - insn_buf; } @@@ -9223,7 -9625,7 +9625,7 @@@ sk_reuseport_is_valid_access(int off, i case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < sizeof_field(struct sk_buff, protocol)) return false; - /* fall through */ + fallthrough; case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): case bpf_ctx_range(struct sk_reuseport_md, bind_inany): case bpf_ctx_range(struct sk_reuseport_md, len): diff --combined net/core/skbuff.c index 6faf73d6a0f7,a5c11aae9c89..bfd748346f20 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -820,7 -820,6 +820,7 @@@ void skb_tx_error(struct sk_buff *skb } EXPORT_SYMBOL(skb_tx_error);
+#ifdef CONFIG_TRACEPOINTS /** * consume_skb - free an skbuff * @skb: buffer to free @@@ -838,7 -837,6 +838,7 @@@ void consume_skb(struct sk_buff *skb __kfree_skb(skb); } EXPORT_SYMBOL(consume_skb); +#endif
/** * consume_stateless_skb - free an skbuff, assuming it is stateless @@@ -5955,8 -5953,7 +5955,7 @@@ static int pskb_carve_inside_nonlinear( size = SKB_WITH_OVERHEAD(ksize(data));
memcpy((struct skb_shared_info *)(data + size), - skb_shinfo(skb), offsetof(struct skb_shared_info, - frags[skb_shinfo(skb)->nr_frags])); + skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); if (skb_orphan_frags(skb, gfp_mask)) { kfree(data); return -ENOMEM; diff --combined net/core/skmsg.c index 649583158983,1c81caf9630f..4b5f7c8fecd1 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@@ -494,14 -494,34 +494,34 @@@ end
struct sk_psock *sk_psock_init(struct sock *sk, int node) { - struct sk_psock *psock = kzalloc_node(sizeof(*psock), - GFP_ATOMIC | __GFP_NOWARN, - node); - if (!psock) - return NULL; + struct sk_psock *psock; + struct proto *prot;
+ write_lock_bh(&sk->sk_callback_lock); + + if (inet_csk_has_ulp(sk)) { + psock = ERR_PTR(-EINVAL); + goto out; + } + + if (sk->sk_user_data) { + psock = ERR_PTR(-EBUSY); + goto out; + } + + psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); + if (!psock) { + psock = ERR_PTR(-ENOMEM); + goto out; + } + + prot = READ_ONCE(sk->sk_prot); psock->sk = sk; - psock->eval = __SK_NONE; + psock->eval = __SK_NONE; + psock->sk_proto = prot; + psock->saved_unhash = prot->unhash; + psock->saved_close = prot->close; + psock->saved_write_space = sk->sk_write_space;
INIT_LIST_HEAD(&psock->link); spin_lock_init(&psock->link_lock); @@@ -516,6 -536,8 +536,8 @@@ rcu_assign_sk_user_data_nocopy(sk, psock); sock_hold(sk);
+ out: + write_unlock_bh(&sk->sk_callback_lock); return psock; } EXPORT_SYMBOL_GPL(sk_psock_init); @@@ -772,6 -794,7 +794,6 @@@ static void sk_psock_verdict_apply(stru sk_psock_skb_redirect(skb); break; case __SK_DROP: - /* fall-through */ default: out_free: kfree_skb(skb); diff --combined net/core/sock.c index 6c5c6b18eff4,64d2aec5ed45..ba9e7d91e2ef --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -413,18 -413,6 +413,6 @@@ static int sock_set_timeout(long *timeo return 0; }
- static void sock_warn_obsolete_bsdism(const char *name) - { - static int warned; - static char warncomm[TASK_COMM_LEN]; - if (strcmp(warncomm, current->comm) && warned < 5) { - strcpy(warncomm, current->comm); - pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", - warncomm, name); - warned++; - } - } - static bool sock_needs_netstamp(const struct sock *sk) { switch (sk->sk_family) { @@@ -984,7 -972,6 +972,6 @@@ set_sndbuf break;
case SO_BSDCOMPAT: - sock_warn_obsolete_bsdism("setsockopt"); break;
case SO_PASSCRED: @@@ -1008,7 -995,7 +995,7 @@@ break; case SO_TIMESTAMPING_NEW: sock_set_flag(sk, SOCK_TSTAMP_NEW); - /* fall through */ + fallthrough; case SO_TIMESTAMPING_OLD: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; @@@ -1387,7 -1374,6 +1374,6 @@@ int sock_getsockopt(struct socket *sock break;
case SO_BSDCOMPAT: - sock_warn_obsolete_bsdism("getsockopt"); break;
case SO_TIMESTAMP_OLD: @@@ -3254,7 -3240,7 +3240,7 @@@ void sk_common_release(struct sock *sk sk->sk_prot->destroy(sk);
/* - * Observation: when sock_common_release is called, processes have + * Observation: when sk_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * diff --combined net/mptcp/protocol.c index 365ba96c84b0,e6216c4f308c..683196225f91 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@@ -24,8 -24,6 +24,6 @@@ #include "protocol.h" #include "mib.h"
- #define MPTCP_SAME_STATE TCP_MAX_STATES - #if IS_ENABLED(CONFIG_MPTCP_IPV6) struct mptcp6_sock { struct mptcp_sock msk; @@@ -193,6 -191,7 +191,6 @@@ static void mptcp_check_data_fin_ack(st sk->sk_state_change(sk); break; case TCP_CLOSING: - fallthrough; case TCP_LAST_ACK: inet_sk_state_store(sk, TCP_CLOSE); sk->sk_state_change(sk); @@@ -891,6 -890,7 +889,6 @@@ restart goto out; }
-wait_for_sndbuf: __mptcp_flush_join_list(msk); ssk = mptcp_subflow_get_send(msk); while (!sk_stream_memory_free(sk) || @@@ -980,7 -980,7 +978,7 @@@ */ mptcp_set_timeout(sk, ssk); release_sock(ssk); - goto wait_for_sndbuf; + goto restart; } } } @@@ -1539,7 -1539,7 +1537,7 @@@ static void mptcp_subflow_shutdown(stru case TCP_LISTEN: if (!(how & RCV_SHUTDOWN)) break; - /* fall through */ + fallthrough; case TCP_SYN_SENT: tcp_disconnect(ssk, O_NONBLOCK); break; diff --combined net/netlabel/netlabel_domainhash.c index f73a8382c275,38aaeadec13d..dc8c39f51f7d --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@@ -85,7 -85,6 +85,7 @@@ static void netlbl_domhsh_free_entry(st kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@@ -538,8 -537,6 +538,8 @@@ int netlbl_domhsh_add(struct netlbl_dom goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL;
@@@ -583,12 -580,6 +583,12 @@@ int netlbl_domhsh_remove_entry(struct n { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */
if (entry == NULL) return -ENOENT; @@@ -606,41 -597,48 +606,40 @@@ ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, - " nlbl_domain=%s res=%u", - entry->domain ? entry->domain : "(default)", - ret_val == 0 ? 1 : 0); + " nlbl_domain=%s res=1", + entry->domain ? entry->domain : "(default)"); audit_log_end(audit_buf); }
- if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); + } #if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } #endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; #if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; #endif /* IPv6 */ - } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val; } diff --combined net/netlink/af_netlink.c index d2d1448274f5,5cee1d0eaebe..f9efd2c1cb50 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -353,7 -353,7 +353,7 @@@ static void netlink_rcv_wake(struct soc { struct netlink_sock *nlk = nlk_sk(sk);
- if (skb_queue_empty(&sk->sk_receive_queue)) + if (skb_queue_empty_lockless(&sk->sk_receive_queue)) clear_bit(NETLINK_S_CONGESTED, &nlk->state); if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) wake_up_interruptible(&nlk->wait); @@@ -848,7 -848,7 +848,7 @@@ retry * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap in the user namespace @user_ns. + * message has the capability @cap in the user namespace @user_ns. */ bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *user_ns, int cap) @@@ -867,7 -867,7 +867,7 @@@ EXPORT_SYMBOL(__netlink_ns_capable) * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap in the user namespace @user_ns. + * message has the capability @cap in the user namespace @user_ns. */ bool netlink_ns_capable(const struct sk_buff *skb, struct user_namespace *user_ns, int cap) @@@ -883,7 -883,7 +883,7 @@@ EXPORT_SYMBOL(netlink_ns_capable) * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap in all user namespaces. + * message has the capability @cap in all user namespaces. */ bool netlink_capable(const struct sk_buff *skb, int cap) { @@@ -898,7 -898,7 +898,7 @@@ EXPORT_SYMBOL(netlink_capable) * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap over the network namespace of + * message has the capability @cap over the network namespace of * the socket we received the message from. */ bool netlink_net_capable(const struct sk_buff *skb, int cap) diff --combined net/netlink/policy.c index 641ffbdd977a,5c9e7530865f..62f977fa645a --- a/net/netlink/policy.c +++ b/net/netlink/policy.c @@@ -188,7 -188,7 +188,7 @@@ send_attribute goto next; case NLA_NESTED: type = NL_ATTR_TYPE_NESTED; - /* fall through */ + fallthrough; case NLA_NESTED_ARRAY: if (pt->type == NLA_NESTED_ARRAY) type = NL_ATTR_TYPE_NESTED_ARRAY; @@@ -254,12 -254,6 +254,6 @@@ pt->bitfield32_valid)) goto nla_put_failure; break; - case NLA_EXACT_LEN: - type = NL_ATTR_TYPE_BINARY; - if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len) || - nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, pt->len)) - goto nla_put_failure; - break; case NLA_STRING: case NLA_NUL_STRING: case NLA_BINARY: @@@ -269,14 -263,27 +263,27 @@@ type = NL_ATTR_TYPE_NUL_STRING; else type = NL_ATTR_TYPE_BINARY; - if (pt->len && nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, - pt->len)) - goto nla_put_failure; - break; - case NLA_MIN_LEN: - type = NL_ATTR_TYPE_BINARY; - if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len)) + + if (pt->validation_type == NLA_VALIDATE_RANGE || + pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG) { + struct netlink_range_validation range; + + nla_get_range_unsigned(pt, &range); + + if (range.min && + nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, + range.min)) + goto nla_put_failure; + + if (range.max < U16_MAX && + nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, + range.max)) + goto nla_put_failure; + } else if (pt->len && + nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, + pt->len)) { goto nla_put_failure; + } break; case NLA_FLAG: type = NL_ATTR_TYPE_FLAG; diff --combined net/sctp/sm_make_chunk.c index c11c24524652,467bd77b6986..9a56ae2f3651 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@@ -1235,7 -1235,7 +1235,7 @@@ nodata
/* Create an Operation Error chunk of a fixed size, specifically, * min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads. - * This is a helper function to allocate an error chunk for for those + * This is a helper function to allocate an error chunk for those * invalid parameter codes in which we may not want to report all the * errors, if the incoming chunk is large. If it can't fit in a single * packet, we ignore it. @@@ -1780,7 -1780,7 +1780,7 @@@ no_hmac * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that - * that a cookie may be considered expired, but his would only slow + * a cookie may be considered expired, but this would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) @@@ -2077,7 -2077,7 +2077,7 @@@ static enum sctp_ierror sctp_process_un break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; - /* Fall through */ + fallthrough; case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. @@@ -2319,7 -2319,7 +2319,7 @@@ int sctp_process_init(struct sctp_assoc
/* This implementation defaults to making the first transport * added as the primary transport. The source address seems to - * be a a better choice than any of the embedded addresses. + * be a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; diff --combined net/socket.c index 0c0144604f81,e84a8e281b4c..82262e1922f9 --- a/net/socket.c +++ b/net/socket.c @@@ -2628,9 -2628,11 +2628,11 @@@ long __sys_recvmsg_sock(struct socket * struct user_msghdr __user *umsg, struct sockaddr __user *uaddr, unsigned int flags) { - /* disallow ancillary data requests from this path */ - if (msg->msg_control || msg->msg_controllen) - return -EINVAL; + if (msg->msg_control || msg->msg_controllen) { + /* disallow ancillary data reqs unless cmsg is plain data */ + if (!(sock->ops->flags & PROTO_CMSG_DATA_ONLY)) + return -EINVAL; + }
return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0); } @@@ -3610,7 -3612,7 +3612,7 @@@ int kernel_getsockname(struct socket *s EXPORT_SYMBOL(kernel_getsockname);
/** - * kernel_peername - get the address which the socket is connected (kernel space) + * kernel_getpeername - get the address which the socket is connected (kernel space) * @sock: socket * @addr: address holder * @@@ -3671,7 -3673,7 +3673,7 @@@ int kernel_sendpage_locked(struct sock EXPORT_SYMBOL(kernel_sendpage_locked);
/** - * kernel_shutdown - shut down part of a full-duplex connection (kernel space) + * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space) * @sock: socket * @how: connection part * diff --combined net/wireless/nl80211.c index 2c9e9a2d1688,201d029687cc..52a35e788547 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -336,6 -336,13 +336,13 @@@ static const struct nla_policy nl80211_ .len = NL80211_MAX_SUPP_HT_RATES }, [NL80211_TXRATE_VHT] = NLA_POLICY_EXACT_LEN_WARN(sizeof(struct nl80211_txrate_vht)), [NL80211_TXRATE_GI] = { .type = NLA_U8 }, + [NL80211_TXRATE_HE] = NLA_POLICY_EXACT_LEN(sizeof(struct nl80211_txrate_he)), + [NL80211_TXRATE_HE_GI] = NLA_POLICY_RANGE(NLA_U8, + NL80211_RATE_INFO_HE_GI_0_8, + NL80211_RATE_INFO_HE_GI_3_2), + [NL80211_TXRATE_HE_LTF] = NLA_POLICY_RANGE(NLA_U8, + NL80211_RATE_INFO_HE_1XLTF, + NL80211_RATE_INFO_HE_4XLTF), };
static const struct nla_policy @@@ -539,7 -546,10 +546,10 @@@ static const struct nla_policy nl80211_ [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, - [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, }, + + /* need to include at least Auth Transaction and Status Code */ + [NL80211_ATTR_AUTH_DATA] = NLA_POLICY_MIN_LEN(4), + [NL80211_ATTR_VHT_CAPABILITY] = NLA_POLICY_EXACT_LEN_WARN(NL80211_VHT_CAPABILITY_LEN), [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127), @@@ -561,23 -571,30 +571,30 @@@ [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 }, - [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 }, + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = + NLA_POLICY_MAX(NLA_U16, NL80211_CRIT_PROTO_MAX_DURATION), [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, - [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY }, - [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY }, - [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY }, - [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY }, + [NL80211_ATTR_CNTDWN_OFFS_BEACON] = { .type = NLA_BINARY }, + [NL80211_ATTR_CNTDWN_OFFS_PRESP] = { .type = NLA_BINARY }, + [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = NLA_POLICY_MIN_LEN(2), + /* + * The value of the Length field of the Supported Operating + * Classes element is between 2 and 253. + */ + [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = + NLA_POLICY_RANGE(NLA_BINARY, 2, 253), [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG }, [NL80211_ATTR_OPMODE_NOTIF] = { .type = NLA_U8 }, [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, - [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY, - .len = IEEE80211_QOS_MAP_LEN_MAX }, + [NL80211_ATTR_QOS_MAP] = NLA_POLICY_RANGE(NLA_BINARY, + IEEE80211_QOS_MAP_LEN_MIN, + IEEE80211_QOS_MAP_LEN_MAX), [NL80211_ATTR_MAC_HINT] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, @@@ -625,15 -642,17 +642,17 @@@ .len = FILS_ERP_MAX_RRK_LEN }, [NL80211_ATTR_FILS_CACHE_ID] = NLA_POLICY_EXACT_LEN_WARN(2), [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, + [NL80211_ATTR_PMKR0_NAME] = NLA_POLICY_EXACT_LEN(WLAN_PMK_NAME_LEN), [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, - [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, - .len = NL80211_HE_MAX_CAPABILITY_LEN }, - + [NL80211_ATTR_HE_CAPABILITY] = + NLA_POLICY_RANGE(NLA_BINARY, + NL80211_HE_MIN_CAPABILITY_LEN, + NL80211_HE_MAX_CAPABILITY_LEN), [NL80211_ATTR_FTM_RESPONDER] = NLA_POLICY_NESTED(nl80211_ftm_responder_policy), [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), @@@ -654,10 -673,8 +673,8 @@@ [NL80211_ATTR_RECEIVE_MULTICAST] = { .type = NLA_FLAG }, [NL80211_ATTR_WIPHY_FREQ_OFFSET] = NLA_POLICY_RANGE(NLA_U32, 0, 999), [NL80211_ATTR_SCAN_FREQ_KHZ] = { .type = NLA_NESTED }, - [NL80211_ATTR_HE_6GHZ_CAPABILITY] = { - .type = NLA_EXACT_LEN, - .len = sizeof(struct ieee80211_he_6ghz_capa), - }, + [NL80211_ATTR_HE_6GHZ_CAPABILITY] = + NLA_POLICY_EXACT_LEN(sizeof(struct ieee80211_he_6ghz_capa)), };
/* policy for the key attributes */ @@@ -703,7 -720,7 +720,7 @@@ nl80211_wowlan_tcp_policy[NUM_NL80211_W [NL80211_WOWLAN_TCP_DST_MAC] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 }, - [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 }, + [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = NLA_POLICY_MIN_LEN(1), [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = { .len = sizeof(struct nl80211_wowlan_tcp_data_seq) }, @@@ -711,8 -728,8 +728,8 @@@ .len = sizeof(struct nl80211_wowlan_tcp_data_token) }, [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 }, - [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 }, - [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 }, + [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = NLA_POLICY_MIN_LEN(1), + [NL80211_WOWLAN_TCP_WAKE_MASK] = NLA_POLICY_MIN_LEN(1), }; #endif /* CONFIG_PM */
@@@ -738,7 -755,7 +755,7 @@@ nl80211_rekey_policy[NUM_NL80211_REKEY_ .type = NLA_BINARY, .len = NL80211_KCK_EXT_LEN }, - [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN_WARN(NL80211_REPLAY_CTR_LEN), + [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN(NL80211_REPLAY_CTR_LEN), [NL80211_REKEY_DATA_AKM] = { .type = NLA_U32 }, };
@@@ -778,7 -795,8 +795,8 @@@ nl80211_bss_select_policy[NL80211_BSS_S /* policy for NAN function attributes */ static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { - [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 }, + [NL80211_NAN_FUNC_TYPE] = + NLA_POLICY_MAX(NLA_U8, NL80211_NAN_FUNC_MAX_TYPE), [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, @@@ -4419,21 -4437,106 +4437,106 @@@ static bool vht_set_mcs_mask(struct iee return true; }
+ static u16 he_mcs_map_to_mcs_mask(u8 he_mcs_map) + { + switch (he_mcs_map) { + case IEEE80211_HE_MCS_NOT_SUPPORTED: + return 0; + case IEEE80211_HE_MCS_SUPPORT_0_7: + return 0x00FF; + case IEEE80211_HE_MCS_SUPPORT_0_9: + return 0x03FF; + case IEEE80211_HE_MCS_SUPPORT_0_11: + return 0xFFF; + default: + break; + } + return 0; + } + + static void he_build_mcs_mask(u16 he_mcs_map, + u16 he_mcs_mask[NL80211_HE_NSS_MAX]) + { + u8 nss; + + for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) { + he_mcs_mask[nss] = he_mcs_map_to_mcs_mask(he_mcs_map & 0x03); + he_mcs_map >>= 2; + } + } + + static u16 he_get_txmcsmap(struct genl_info *info, + const struct ieee80211_sta_he_cap *he_cap) + { + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + __le16 tx_mcs; + + switch (wdev->chandef.width) { + case NL80211_CHAN_WIDTH_80P80: + tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80p80; + break; + case NL80211_CHAN_WIDTH_160: + tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_160; + break; + default: + tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80; + break; + } + return le16_to_cpu(tx_mcs); + } + + static bool he_set_mcs_mask(struct genl_info *info, + struct wireless_dev *wdev, + struct ieee80211_supported_band *sband, + struct nl80211_txrate_he *txrate, + u16 mcs[NL80211_HE_NSS_MAX]) + { + const struct ieee80211_sta_he_cap *he_cap; + u16 tx_mcs_mask[NL80211_HE_NSS_MAX] = {}; + u16 tx_mcs_map = 0; + u8 i; + + he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype); + if (!he_cap) + return false; + + memset(mcs, 0, sizeof(u16) * NL80211_HE_NSS_MAX); + + tx_mcs_map = he_get_txmcsmap(info, he_cap); + + /* Build he_mcs_mask from HE capabilities */ + he_build_mcs_mask(tx_mcs_map, tx_mcs_mask); + + for (i = 0; i < NL80211_HE_NSS_MAX; i++) { + if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i]) + mcs[i] = txrate->mcs[i]; + else + return false; + } + + return true; + } + static int nl80211_parse_tx_bitrate_mask(struct genl_info *info, struct nlattr *attrs[], enum nl80211_attrs attr, - struct cfg80211_bitrate_mask *mask) + struct cfg80211_bitrate_mask *mask, + struct net_device *dev) { struct nlattr *tb[NL80211_TXRATE_MAX + 1]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct wireless_dev *wdev = dev->ieee80211_ptr; int rem, i; struct nlattr *tx_rates; struct ieee80211_supported_band *sband; - u16 vht_tx_mcs_map; + u16 vht_tx_mcs_map, he_tx_mcs_map;
memset(mask, 0, sizeof(*mask)); /* Default to all rates enabled */ for (i = 0; i < NUM_NL80211_BANDS; i++) { + const struct ieee80211_sta_he_cap *he_cap; + sband = rdev->wiphy.bands[i];
if (!sband) @@@ -4449,6 -4552,16 +4552,16 @@@
vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs); + + he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype); + if (!he_cap) + continue; + + he_tx_mcs_map = he_get_txmcsmap(info, he_cap); + he_build_mcs_mask(he_tx_mcs_map, mask->control[i].he_mcs); + + mask->control[i].he_gi = 0xFF; + mask->control[i].he_ltf = 0xFF; }
/* if no rates are given set it back to the defaults */ @@@ -4504,13 -4617,25 +4617,25 @@@ if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI) return -EINVAL; } + if (tb[NL80211_TXRATE_HE] && + !he_set_mcs_mask(info, wdev, sband, + nla_data(tb[NL80211_TXRATE_HE]), + mask->control[band].he_mcs)) + return -EINVAL; + if (tb[NL80211_TXRATE_HE_GI]) + mask->control[band].he_gi = + nla_get_u8(tb[NL80211_TXRATE_HE_GI]); + if (tb[NL80211_TXRATE_HE_LTF]) + mask->control[band].he_ltf = + nla_get_u8(tb[NL80211_TXRATE_HE_LTF]);
if (mask->control[band].legacy == 0) { - /* don't allow empty legacy rates if HT or VHT + /* don't allow empty legacy rates if HT, VHT or HE * are not even supported. */ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported || - rdev->wiphy.bands[band]->vht_cap.vht_supported)) + rdev->wiphy.bands[band]->vht_cap.vht_supported || + ieee80211_get_he_iftype_cap(sband, wdev->iftype))) return -EINVAL;
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) @@@ -4521,6 -4646,10 +4646,10 @@@ if (mask->control[band].vht_mcs[i]) goto out;
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) + if (mask->control[band].he_mcs[i]) + goto out; + /* legacy and mcs rates may not be both empty */ return -EINVAL; } @@@ -4831,8 -4960,9 +4960,9 @@@ static bool nl80211_valid_auth_type(str return false; return true; case NL80211_CMD_START_AP: - /* SAE not supported yet */ - if (auth_type == NL80211_AUTHTYPE_SAE) + if (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP) && + auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK || @@@ -4896,8 -5026,7 +5026,7 @@@ static int nl80211_start_ap(struct sk_b params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); params.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); - if (params.ssid_len == 0 || - params.ssid_len > IEEE80211_MAX_SSID_LEN) + if (params.ssid_len == 0) return -EINVAL; }
@@@ -4966,7 -5095,8 +5095,8 @@@ if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, - ¶ms.beacon_rate); + ¶ms.beacon_rate, + dev); if (err) return err;
@@@ -5837,11 -5967,9 +5967,9 @@@ static int nl80211_parse_sta_channel_in nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]); /* * Need to include at least one (first channel, number of - * channels) tuple for each subband, and must have proper - * tuples for the rest of the data as well. + * channels) tuple for each subband (checked in policy), + * and must have proper tuples for the rest of the data as well. */ - if (params->supported_channels_len < 2) - return -EINVAL; if (params->supported_channels_len % 2) return -EINVAL; } @@@ -5851,13 -5979,6 +5979,6 @@@ nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); params->supported_oper_classes_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); - /* - * The value of the Length field of the Supported Operating - * Classes element is between 2 and 253. - */ - if (params->supported_oper_classes_len < 2 || - params->supported_oper_classes_len > 253) - return -EINVAL; } return 0; } @@@ -5880,9 -6001,6 +6001,6 @@@ static int nl80211_set_station_tdls(str nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params->he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); - - if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) - return -EINVAL; }
err = nl80211_parse_sta_channel_info(info, params); @@@ -6011,7 -6129,7 +6129,7 @@@ static int nl80211_set_station(struct s
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) params.he_6ghz_capa = - nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]) params.airtime_weight = @@@ -6141,10 -6259,6 +6259,6 @@@ static int nl80211_new_station(struct s nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); - - /* max len is validated in nla policy */ - if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) - return -EINVAL; }
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) @@@ -8416,23 -8530,14 +8530,14 @@@ nl80211_parse_sched_scan(struct wiphy * }
if (ssid) { - if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { - err = -EINVAL; - goto out_free; - } memcpy(request->match_sets[i].ssid.ssid, nla_data(ssid), nla_len(ssid)); request->match_sets[i].ssid.ssid_len = nla_len(ssid); } - if (bssid) { - if (nla_len(bssid) != ETH_ALEN) { - err = -EINVAL; - goto out_free; - } + if (bssid) memcpy(request->match_sets[i].bssid, nla_data(bssid), ETH_ALEN); - }
/* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; @@@ -8787,10 -8892,10 +8892,10 @@@ static int nl80211_channel_switch(struc if (err) return err;
- if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]) + if (!csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]) return -EINVAL;
- len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); + len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]); if (!len || (len % sizeof(u16))) return -EINVAL;
@@@ -8801,7 -8906,7 +8906,7 @@@ return -EINVAL;
params.counter_offsets_beacon = - nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); + nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
/* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_beacon; i++) { @@@ -8814,8 -8919,8 +8919,8 @@@ return -EINVAL; }
- if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) { - len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); + if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) { + len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]); if (!len || (len % sizeof(u16))) return -EINVAL;
@@@ -8826,7 -8931,7 +8931,7 @@@ return -EINVAL;
params.counter_offsets_presp = - nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); + nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
/* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_presp; i++) { @@@ -9309,9 -9414,6 +9414,6 @@@ static int nl80211_authenticate(struct return -EINVAL; auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]); auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]); - /* need to include at least Auth Transaction and Status Code */ - if (auth_data_len < 4) - return -EINVAL; }
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; @@@ -9451,7 -9553,9 +9553,9 @@@ static int nl80211_crypto_settings(stru
if (info->attrs[NL80211_ATTR_SAE_PASSWORD]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_SAE_OFFLOAD)) + NL80211_EXT_FEATURE_SAE_OFFLOAD) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP)) return -EINVAL; settings->sae_pwd = nla_data(info->attrs[NL80211_ATTR_SAE_PASSWORD]); @@@ -10798,7 -10902,8 +10902,8 @@@ static int nl80211_set_tx_bitrate_mask( return -EOPNOTSUPP;
err = nl80211_parse_tx_bitrate_mask(info, info->attrs, - NL80211_ATTR_TX_RATES, &mask); + NL80211_ATTR_TX_RATES, &mask, + dev); if (err) return err;
@@@ -11406,7 -11511,8 +11511,8 @@@ static int nl80211_join_mesh(struct sk_ if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, - &setup.beacon_rate); + &setup.beacon_rate, + dev); if (err) return err;
@@@ -12358,8 -12464,6 +12464,6 @@@ static int nl80211_set_rekey_data(struc if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_KCK]) return -EINVAL; - if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) - return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK && nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KEK_EXT_LEN)) @@@ -12684,8 -12788,7 +12788,7 @@@ static int nl80211_nan_add_func(struct
func->cookie = cfg80211_assign_cookie(rdev);
- if (!tb[NL80211_NAN_FUNC_TYPE] || - nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) { + if (!tb[NL80211_NAN_FUNC_TYPE]) { err = -EINVAL; goto out; } @@@ -13175,9 -13278,6 +13278,6 @@@ static int nl80211_crit_protocol_start( duration = nla_get_u16(info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]);
- if (duration > NL80211_CRIT_PROTO_MAX_DURATION) - return -ERANGE; - ret = rdev_crit_proto_start(rdev, wdev, proto, duration); if (!ret) rdev->crit_proto_nlportid = info->snd_portid; @@@ -13562,8 -13662,7 +13662,7 @@@ static int nl80211_set_qos_map(struct s pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]); len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]);
- if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN || - len > IEEE80211_QOS_MAP_LEN_MAX) + if (len % 2) return -EINVAL;
qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL); @@@ -13831,17 -13930,9 +13930,9 @@@ static int nl80211_set_pmk(struct sk_bu goto out; }
- if (info->attrs[NL80211_ATTR_PMKR0_NAME]) { - int r0_name_len = nla_len(info->attrs[NL80211_ATTR_PMKR0_NAME]); - - if (r0_name_len != WLAN_PMK_NAME_LEN) { - ret = -EINVAL; - goto out; - } - + if (info->attrs[NL80211_ATTR_PMKR0_NAME]) pmk_conf.pmk_r0_name = nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]); - }
ret = rdev_set_pmk(rdev, dev, &pmk_conf); out: @@@ -13900,8 -13991,7 +13991,7 @@@ static int nl80211_external_auth(struc
if (info->attrs[NL80211_ATTR_SSID]) { params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); - if (params.ssid.ssid_len == 0 || - params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN) + if (params.ssid.ssid_len == 0) return -EINVAL; memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]), @@@ -14202,7 -14292,7 +14292,7 @@@ static int parse_tid_conf(struct cfg802 if (tid_conf->txrate_type != NL80211_TX_RATE_AUTOMATIC) { attr = NL80211_TID_CONFIG_ATTR_TX_RATE; err = nl80211_parse_tx_bitrate_mask(info, attrs, attr, - &tid_conf->txrate_mask); + &tid_conf->txrate_mask, dev); if (err) return err;
diff --combined net/wireless/reg.c index d8a90d397423,dcd3d39a5372..0ab7808fcec8 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@@ -1594,7 -1594,7 +1594,7 @@@ freq_reg_info_regd(u32 center_freq
/* * We only need to know if one frequency rule was - * was in center_freq's band, that's enough, so lets + * in center_freq's band, that's enough, so let's * not overwrite it once found */ if (!band_rule_found) @@@ -1691,57 -1691,18 +1691,18 @@@ static uint32_t reg_rule_to_chan_bw_fla return bw_flags; }
- /* - * Note that right now we assume the desired channel bandwidth - * is always 20 MHz for each individual channel (HT40 uses 20 MHz - * per channel, the primary and the extension channel). - */ - static void handle_channel(struct wiphy *wiphy, - enum nl80211_reg_initiator initiator, - struct ieee80211_channel *chan) + static void handle_channel_single_rule(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ieee80211_channel *chan, + u32 flags, + struct regulatory_request *lr, + struct wiphy *request_wiphy, + const struct ieee80211_reg_rule *reg_rule) { - u32 flags, bw_flags = 0; - const struct ieee80211_reg_rule *reg_rule = NULL; + u32 bw_flags = 0; const struct ieee80211_power_rule *power_rule = NULL; - struct wiphy *request_wiphy = NULL; - struct regulatory_request *lr = get_last_request(); const struct ieee80211_regdomain *regd;
- request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); - - flags = chan->orig_flags; - - reg_rule = freq_reg_info(wiphy, ieee80211_channel_to_khz(chan)); - if (IS_ERR(reg_rule)) { - /* - * We will disable all channels that do not match our - * received regulatory rule unless the hint is coming - * from a Country IE and the Country IE had no information - * about a band. The IEEE 802.11 spec allows for an AP - * to send only a subset of the regulatory rules allowed, - * so an AP in the US that only supports 2.4 GHz may only send - * a country IE with information for the 2.4 GHz band - * while 5 GHz is still supported. - */ - if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && - PTR_ERR(reg_rule) == -ERANGE) - return; - - if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && - request_wiphy && request_wiphy == wiphy && - request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { - pr_debug("Disabling freq %d.%03d MHz for good\n", - chan->center_freq, chan->freq_offset); - chan->orig_flags |= IEEE80211_CHAN_DISABLED; - chan->flags = chan->orig_flags; - } else { - pr_debug("Disabling freq %d.%03d MHz\n", - chan->center_freq, chan->freq_offset); - chan->flags |= IEEE80211_CHAN_DISABLED; - } - return; - } - regd = reg_get_regdomain(wiphy);
power_rule = ®_rule->power_rule; @@@ -1803,6 -1764,204 +1764,204 @@@ chan->max_power = chan->max_reg_power; }
+ static void handle_channel_adjacent_rules(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ieee80211_channel *chan, + u32 flags, + struct regulatory_request *lr, + struct wiphy *request_wiphy, + const struct ieee80211_reg_rule *rrule1, + const struct ieee80211_reg_rule *rrule2, + struct ieee80211_freq_range *comb_range) + { + u32 bw_flags1 = 0; + u32 bw_flags2 = 0; + const struct ieee80211_power_rule *power_rule1 = NULL; + const struct ieee80211_power_rule *power_rule2 = NULL; + const struct ieee80211_regdomain *regd; + + regd = reg_get_regdomain(wiphy); + + power_rule1 = &rrule1->power_rule; + power_rule2 = &rrule2->power_rule; + bw_flags1 = reg_rule_to_chan_bw_flags(regd, rrule1, chan); + bw_flags2 = reg_rule_to_chan_bw_flags(regd, rrule2, chan); + + if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && + request_wiphy && request_wiphy == wiphy && + request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { + /* This guarantees the driver's requested regulatory domain + * will always be used as a base for further regulatory + * settings + */ + chan->flags = + map_regdom_flags(rrule1->flags) | + map_regdom_flags(rrule2->flags) | + bw_flags1 | + bw_flags2; + chan->orig_flags = chan->flags; + chan->max_antenna_gain = + min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain), + MBI_TO_DBI(power_rule2->max_antenna_gain)); + chan->orig_mag = chan->max_antenna_gain; + chan->max_reg_power = + min_t(int, MBM_TO_DBM(power_rule1->max_eirp), + MBM_TO_DBM(power_rule2->max_eirp)); + chan->max_power = chan->max_reg_power; + chan->orig_mpwr = chan->max_reg_power; + + if (chan->flags & IEEE80211_CHAN_RADAR) { + chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) + chan->dfs_cac_ms = max_t(unsigned int, + rrule1->dfs_cac_ms, + rrule2->dfs_cac_ms); + } + + return; + } + + chan->dfs_state = NL80211_DFS_USABLE; + chan->dfs_state_entered = jiffies; + + chan->beacon_found = false; + chan->flags = flags | bw_flags1 | bw_flags2 | + map_regdom_flags(rrule1->flags) | + map_regdom_flags(rrule2->flags); + + /* reg_rule_to_chan_bw_flags may forbids 10 and forbids 20 MHz + * (otherwise no adj. rule case), recheck therefore + */ + if (cfg80211_does_bw_fit_range(comb_range, + ieee80211_channel_to_khz(chan), + MHZ_TO_KHZ(10))) + chan->flags &= ~IEEE80211_CHAN_NO_10MHZ; + if (cfg80211_does_bw_fit_range(comb_range, + ieee80211_channel_to_khz(chan), + MHZ_TO_KHZ(20))) + chan->flags &= ~IEEE80211_CHAN_NO_20MHZ; + + chan->max_antenna_gain = + min_t(int, chan->orig_mag, + min_t(int, + MBI_TO_DBI(power_rule1->max_antenna_gain), + MBI_TO_DBI(power_rule2->max_antenna_gain))); + chan->max_reg_power = min_t(int, + MBM_TO_DBM(power_rule1->max_eirp), + MBM_TO_DBM(power_rule2->max_eirp)); + + if (chan->flags & IEEE80211_CHAN_RADAR) { + if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) + chan->dfs_cac_ms = max_t(unsigned int, + rrule1->dfs_cac_ms, + rrule2->dfs_cac_ms); + else + chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + } + + if (chan->orig_mpwr) { + /* Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER + * will always follow the passed country IE power settings. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) + chan->max_power = chan->max_reg_power; + else + chan->max_power = min(chan->orig_mpwr, + chan->max_reg_power); + } else { + chan->max_power = chan->max_reg_power; + } + } + + /* Note that right now we assume the desired channel bandwidth + * is always 20 MHz for each individual channel (HT40 uses 20 MHz + * per channel, the primary and the extension channel). + */ + static void handle_channel(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ieee80211_channel *chan) + { + const u32 orig_chan_freq = ieee80211_channel_to_khz(chan); + struct regulatory_request *lr = get_last_request(); + struct wiphy *request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); + const struct ieee80211_reg_rule *rrule = NULL; + const struct ieee80211_reg_rule *rrule1 = NULL; + const struct ieee80211_reg_rule *rrule2 = NULL; + + u32 flags = chan->orig_flags; + + rrule = freq_reg_info(wiphy, orig_chan_freq); + if (IS_ERR(rrule)) { + /* check for adjacent match, therefore get rules for + * chan - 20 MHz and chan + 20 MHz and test + * if reg rules are adjacent + */ + rrule1 = freq_reg_info(wiphy, + orig_chan_freq - MHZ_TO_KHZ(20)); + rrule2 = freq_reg_info(wiphy, + orig_chan_freq + MHZ_TO_KHZ(20)); + if (!IS_ERR(rrule1) && !IS_ERR(rrule2)) { + struct ieee80211_freq_range comb_range; + + if (rrule1->freq_range.end_freq_khz != + rrule2->freq_range.start_freq_khz) + goto disable_chan; + + comb_range.start_freq_khz = + rrule1->freq_range.start_freq_khz; + comb_range.end_freq_khz = + rrule2->freq_range.end_freq_khz; + comb_range.max_bandwidth_khz = + min_t(u32, + rrule1->freq_range.max_bandwidth_khz, + rrule2->freq_range.max_bandwidth_khz); + + if (!cfg80211_does_bw_fit_range(&comb_range, + orig_chan_freq, + MHZ_TO_KHZ(20))) + goto disable_chan; + + handle_channel_adjacent_rules(wiphy, initiator, chan, + flags, lr, request_wiphy, + rrule1, rrule2, + &comb_range); + return; + } + + disable_chan: + /* We will disable all channels that do not match our + * received regulatory rule unless the hint is coming + * from a Country IE and the Country IE had no information + * about a band. The IEEE 802.11 spec allows for an AP + * to send only a subset of the regulatory rules allowed, + * so an AP in the US that only supports 2.4 GHz may only send + * a country IE with information for the 2.4 GHz band + * while 5 GHz is still supported. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + PTR_ERR(rrule) == -ERANGE) + return; + + if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && + request_wiphy && request_wiphy == wiphy && + request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { + pr_debug("Disabling freq %d.%03d MHz for good\n", + chan->center_freq, chan->freq_offset); + chan->orig_flags |= IEEE80211_CHAN_DISABLED; + chan->flags = chan->orig_flags; + } else { + pr_debug("Disabling freq %d.%03d MHz\n", + chan->center_freq, chan->freq_offset); + chan->flags |= IEEE80211_CHAN_DISABLED; + } + return; + } + + handle_channel_single_rule(wiphy, initiator, chan, flags, lr, + request_wiphy, rrule); + } + static void handle_band(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_supported_band *sband) @@@ -2946,9 -3105,6 +3105,9 @@@ int regulatory_hint_user(const char *al if (WARN_ON(!alpha2)) return -EINVAL;
+ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; @@@ -3170,7 -3326,7 +3329,7 @@@ static void restore_custom_reg_settings * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country - * keep their own regulatory domain on wiphy->regd so that does does + * keep their own regulatory domain on wiphy->regd so that does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user, bool cached) diff --combined tools/lib/bpf/libbpf.c index 7253b833576c,b688aadf09c5..46d727b45c81 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@@ -44,7 -44,6 +44,6 @@@ #include <sys/vfs.h> #include <sys/utsname.h> #include <sys/resource.h> - #include <tools/libc_compat.h> #include <libelf.h> #include <gelf.h> #include <zlib.h> @@@ -56,9 -55,6 +55,6 @@@ #include "libbpf_internal.h" #include "hashmap.h"
- /* make sure libbpf doesn't use kernel-only integer typedefs */ - #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 - #ifndef EM_BPF #define EM_BPF 247 #endif @@@ -67,6 -63,8 +63,8 @@@ #define BPF_FS_MAGIC 0xcafe4a11 #endif
+ #define BPF_INSN_SZ (sizeof(struct bpf_insn)) + /* vsprintf() in __base_pr() uses nonliteral format string. It may break * compilation if user enables corresponding warning. Disable it explicitly. */ @@@ -154,34 -152,35 +152,35 @@@ static void pr_perm_msg(int err ___err; }) #endif
- #ifdef HAVE_LIBELF_MMAP_SUPPORT - # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP - #else - # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ - #endif - static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; }
- struct bpf_capabilities { + enum kern_feature_id { /* v4.14: kernel support for program & map names. */ - __u32 name:1; + FEAT_PROG_NAME, /* v5.2: kernel support for global data sections. */ - __u32 global_data:1; + FEAT_GLOBAL_DATA, + /* BTF support */ + FEAT_BTF, /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ - __u32 btf_func:1; + FEAT_BTF_FUNC, /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ - __u32 btf_datasec:1; - /* BPF_F_MMAPABLE is supported for arrays */ - __u32 array_mmap:1; + FEAT_BTF_DATASEC, /* BTF_FUNC_GLOBAL is supported */ - __u32 btf_func_global:1; + FEAT_BTF_GLOBAL_FUNC, + /* BPF_F_MMAPABLE is supported for arrays */ + FEAT_ARRAY_MMAP, /* kernel support for expected_attach_type in BPF_PROG_LOAD */ - __u32 exp_attach_type:1; + FEAT_EXP_ATTACH_TYPE, + /* bpf_probe_read_{kernel,user}[_str] helpers */ + FEAT_PROBE_READ_KERN, + __FEAT_CNT, };
+ static bool kernel_supports(enum kern_feature_id feat_id); + enum reloc_type { RELO_LD64, RELO_CALL, @@@ -209,6 -208,7 +208,7 @@@ struct bpf_sec_def bool is_exp_attach_type_optional; bool is_attachable; bool is_attach_btf; + bool is_sleepable; attach_fn_t attach_fn; };
@@@ -253,8 -253,6 +253,6 @@@ struct bpf_program __u32 func_info_rec_size; __u32 func_info_cnt;
- struct bpf_capabilities *caps; - void *line_info; __u32 line_info_rec_size; __u32 line_info_cnt; @@@ -403,6 -401,7 +401,7 @@@ struct bpf_object Elf_Data *rodata; Elf_Data *bss; Elf_Data *st_ops_data; + size_t shstrndx; /* section index for section name strings */ size_t strtabidx; struct { GElf_Shdr shdr; @@@ -436,12 -435,18 +435,18 @@@ void *priv; bpf_object_clear_priv_t clear_priv;
- struct bpf_capabilities caps; - char path[]; }; #define obj_elf_valid(o) ((o)->efile.elf)
+ static const char *elf_sym_str(const struct bpf_object *obj, size_t off); + static const char *elf_sec_str(const struct bpf_object *obj, size_t off); + static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); + static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); + static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr); + static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); + static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); + void bpf_program__unload(struct bpf_program *prog) { int i; @@@ -503,7 -508,7 +508,7 @@@ static char *__bpf_program__pin_name(st }
static int - bpf_program__init(void *data, size_t size, char *section_name, int idx, + bpf_program__init(void *data, size_t size, const char *section_name, int idx, struct bpf_program *prog) { const size_t bpf_insn_sz = sizeof(struct bpf_insn); @@@ -552,7 -557,7 +557,7 @@@ errout
static int bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, - char *section_name, int idx) + const char *section_name, int idx) { struct bpf_program prog, *progs; int nr_progs, err; @@@ -561,11 -566,10 +566,10 @@@ if (err) return err;
- prog.caps = &obj->caps; progs = obj->programs; nr_progs = obj->nr_programs;
- progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); + progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(progs[0])); if (!progs) { /* * In this case the original obj->programs @@@ -578,7 -582,7 +582,7 @@@ return -ENOMEM; }
- pr_debug("found program %s\n", prog.section_name); + pr_debug("elf: found program '%s'\n", prog.section_name); obj->programs = progs; obj->nr_programs = nr_progs + 1; prog.obj = obj; @@@ -598,8 -602,7 +602,7 @@@ bpf_object__init_prog_names(struct bpf_
prog = &obj->programs[pi];
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; - si++) { + for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; si++) { GElf_Sym sym;
if (!gelf_getsym(symbols, si, &sym)) @@@ -609,11 -612,9 +612,9 @@@ if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) continue;
- name = elf_strptr(obj->efile.elf, - obj->efile.strtabidx, - sym.st_name); + name = elf_sym_str(obj, sym.st_name); if (!name) { - pr_warn("failed to get sym name string for prog %s\n", + pr_warn("prog '%s': failed to get symbol name\n", prog->section_name); return -LIBBPF_ERRNO__LIBELF; } @@@ -623,17 -624,14 +624,14 @@@ name = ".text";
if (!name) { - pr_warn("failed to find sym for prog %s\n", + pr_warn("prog '%s': failed to find program symbol\n", prog->section_name); return -EINVAL; }
prog->name = strdup(name); - if (!prog->name) { - pr_warn("failed to allocate memory for prog sym %s\n", - name); + if (!prog->name) return -ENOMEM; - } }
return 0; @@@ -1066,13 -1064,18 +1064,18 @@@ static void bpf_object__elf_finish(stru obj->efile.obj_buf_sz = 0; }
+ /* if libelf is old and doesn't support mmap(), fall back to read() */ + #ifndef ELF_C_READ_MMAP + #define ELF_C_READ_MMAP ELF_C_READ + #endif + static int bpf_object__elf_init(struct bpf_object *obj) { int err = 0; GElf_Ehdr *ep;
if (obj_elf_valid(obj)) { - pr_warn("elf init: internal error\n"); + pr_warn("elf: init internal error\n"); return -LIBBPF_ERRNO__LIBELF; }
@@@ -1090,31 -1093,44 +1093,44 @@@
err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("failed to open %s: %s\n", obj->path, cp); + pr_warn("elf: failed to open %s: %s\n", obj->path, cp); return err; }
- obj->efile.elf = elf_begin(obj->efile.fd, - LIBBPF_ELF_C_READ_MMAP, NULL); + obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); }
if (!obj->efile.elf) { - pr_warn("failed to open %s as ELF file\n", obj->path); + pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__LIBELF; goto errout; }
if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { - pr_warn("failed to get EHDR from %s\n", obj->path); + pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; goto errout; } ep = &obj->efile.ehdr;
+ if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { + pr_warn("elf: failed to get section names section index for %s: %s\n", + obj->path, elf_errmsg(-1)); + err = -LIBBPF_ERRNO__FORMAT; + goto errout; + } + + /* Elf is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { + pr_warn("elf: failed to get section names strings from %s: %s\n", + obj->path, elf_errmsg(-1)); + return -LIBBPF_ERRNO__FORMAT; + } + /* Old LLVM set e_machine to EM_NONE */ if (ep->e_type != ET_REL || (ep->e_machine && ep->e_machine != EM_BPF)) { - pr_warn("%s is not an eBPF object file\n", obj->path); + pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } @@@ -1136,7 -1152,7 +1152,7 @@@ static int bpf_object__check_endianness #else # error "Unrecognized __BYTE_ORDER__" #endif - pr_warn("endianness mismatch.\n"); + pr_warn("elf: endianness mismatch in %s.\n", obj->path); return -LIBBPF_ERRNO__ENDIAN; }
@@@ -1171,55 -1187,10 +1187,10 @@@ static bool bpf_map_type__is_map_in_map return false; }
- static int bpf_object_search_section_size(const struct bpf_object *obj, - const char *name, size_t *d_size) - { - const GElf_Ehdr *ep = &obj->efile.ehdr; - Elf *elf = obj->efile.elf; - Elf_Scn *scn = NULL; - int idx = 0; - - while ((scn = elf_nextscn(elf, scn)) != NULL) { - const char *sec_name; - Elf_Data *data; - GElf_Shdr sh; - - idx++; - if (gelf_getshdr(scn, &sh) != &sh) { - pr_warn("failed to get section(%d) header from %s\n", - idx, obj->path); - return -EIO; - } - - sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); - if (!sec_name) { - pr_warn("failed to get section(%d) name from %s\n", - idx, obj->path); - return -EIO; - } - - if (strcmp(name, sec_name)) - continue; - - data = elf_getdata(scn, 0); - if (!data) { - pr_warn("failed to get section(%d) data from %s(%s)\n", - idx, name, obj->path); - return -EIO; - } - - *d_size = data->d_size; - return 0; - } - - return -ENOENT; - } - int bpf_object__section_size(const struct bpf_object *obj, const char *name, __u32 *size) { int ret = -ENOENT; - size_t d_size;
*size = 0; if (!name) { @@@ -1237,9 -1208,13 +1208,13 @@@ if (obj->efile.st_ops_data) *size = obj->efile.st_ops_data->d_size; } else { - ret = bpf_object_search_section_size(obj, name, &d_size); - if (!ret) - *size = d_size; + Elf_Scn *scn = elf_sec_by_name(obj, name); + Elf_Data *data = elf_sec_data(obj, scn); + + if (data) { + ret = 0; /* found it */ + *size = data->d_size; + } }
return *size ? 0 : ret; @@@ -1264,8 -1239,7 +1239,7 @@@ int bpf_object__variable_offset(const s GELF_ST_TYPE(sym.st_info) != STT_OBJECT) continue;
- sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name); + sname = elf_sym_str(obj, sym.st_name); if (!sname) { pr_warn("failed to get sym name string for var %s\n", name); @@@ -1290,7 -1264,7 +1264,7 @@@ static struct bpf_map *bpf_object__add_ return &obj->maps[obj->nr_maps++];
new_cap = max((size_t)4, obj->maps_cap * 3 / 2); - new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps)); + new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); if (!new_maps) { pr_warn("alloc maps for object failed\n"); return ERR_PTR(-ENOMEM); @@@ -1742,12 -1716,12 +1716,12 @@@ static int bpf_object__init_user_maps(s if (!symbols) return -EINVAL;
- scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); - if (scn) - data = elf_getdata(scn, NULL); + + scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); + data = elf_sec_data(obj, scn); if (!scn || !data) { - pr_warn("failed to get Elf_Data from map section %d\n", - obj->efile.maps_shndx); + pr_warn("elf: failed to get legacy map definitions for %s\n", + obj->path); return -EINVAL; }
@@@ -1769,12 -1743,12 +1743,12 @@@ nr_maps++; } /* Assume equally sized map definitions */ - pr_debug("maps in %s: %d maps in %zd bytes\n", - obj->path, nr_maps, data->d_size); + pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", + nr_maps, data->d_size, obj->path);
if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { - pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n", - obj->path, nr_maps, data->d_size); + pr_warn("elf: unable to determine legacy map definition size in %s\n", + obj->path); return -EINVAL; } map_def_sz = data->d_size / nr_maps; @@@ -1795,8 -1769,7 +1769,7 @@@ if (IS_ERR(map)) return PTR_ERR(map);
- map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name); + map_name = elf_sym_str(obj, sym.st_name); if (!map_name) { pr_warn("failed to get map #%d name sym string for obj %s\n", i, obj->path); @@@ -1884,6 -1857,29 +1857,29 @@@ resolve_func_ptr(const struct btf *btf return btf_is_func_proto(t) ? t : NULL; }
+ static const char *btf_kind_str(const struct btf_type *t) + { + switch (btf_kind(t)) { + case BTF_KIND_UNKN: return "void"; + case BTF_KIND_INT: return "int"; + case BTF_KIND_PTR: return "ptr"; + case BTF_KIND_ARRAY: return "array"; + case BTF_KIND_STRUCT: return "struct"; + case BTF_KIND_UNION: return "union"; + case BTF_KIND_ENUM: return "enum"; + case BTF_KIND_FWD: return "fwd"; + case BTF_KIND_TYPEDEF: return "typedef"; + case BTF_KIND_VOLATILE: return "volatile"; + case BTF_KIND_CONST: return "const"; + case BTF_KIND_RESTRICT: return "restrict"; + case BTF_KIND_FUNC: return "func"; + case BTF_KIND_FUNC_PROTO: return "func_proto"; + case BTF_KIND_VAR: return "var"; + case BTF_KIND_DATASEC: return "datasec"; + default: return "unknown"; + } + } + /* * Fetch integer attribute of BTF map definition. Such attributes are * represented using a pointer to an array, in which dimensionality of array @@@ -1900,8 -1896,8 +1896,8 @@@ static bool get_map_field_int(const cha const struct btf_type *arr_t;
if (!btf_is_ptr(t)) { - pr_warn("map '%s': attr '%s': expected PTR, got %u.\n", - map_name, name, btf_kind(t)); + pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", + map_name, name, btf_kind_str(t)); return false; }
@@@ -1912,8 -1908,8 +1908,8 @@@ return false; } if (!btf_is_array(arr_t)) { - pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n", - map_name, name, btf_kind(arr_t)); + pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", + map_name, name, btf_kind_str(arr_t)); return false; } arr_info = btf_array(arr_t); @@@ -1924,7 -1920,7 +1920,7 @@@ static int build_map_pin_path(struct bpf_map *map, const char *path) { char buf[PATH_MAX]; - int err, len; + int len;
if (!path) path = "/sys/fs/bpf"; @@@ -1935,11 -1931,7 +1931,7 @@@ else if (len >= PATH_MAX) return -ENAMETOOLONG;
- err = bpf_map__set_pin_path(map, buf); - if (err) - return err; - - return 0; + return bpf_map__set_pin_path(map, buf); }
@@@ -2007,8 -1999,8 +1999,8 @@@ static int parse_btf_map_def(struct bpf return -EINVAL; } if (!btf_is_ptr(t)) { - pr_warn("map '%s': key spec is not PTR: %u.\n", - map->name, btf_kind(t)); + pr_warn("map '%s': key spec is not PTR: %s.\n", + map->name, btf_kind_str(t)); return -EINVAL; } sz = btf__resolve_size(obj->btf, t->type); @@@ -2049,8 -2041,8 +2041,8 @@@ return -EINVAL; } if (!btf_is_ptr(t)) { - pr_warn("map '%s': value spec is not PTR: %u.\n", - map->name, btf_kind(t)); + pr_warn("map '%s': value spec is not PTR: %s.\n", + map->name, btf_kind_str(t)); return -EINVAL; } sz = btf__resolve_size(obj->btf, t->type); @@@ -2107,14 -2099,14 +2099,14 @@@ t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, NULL); if (!btf_is_ptr(t)) { - pr_warn("map '%s': map-in-map inner def is of unexpected kind %u.\n", - map->name, btf_kind(t)); + pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", + map->name, btf_kind_str(t)); return -EINVAL; } t = skip_mods_and_typedefs(obj->btf, t->type, NULL); if (!btf_is_struct(t)) { - pr_warn("map '%s': map-in-map inner def is of unexpected kind %u.\n", - map->name, btf_kind(t)); + pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", + map->name, btf_kind_str(t)); return -EINVAL; }
@@@ -2205,8 -2197,8 +2197,8 @@@ static int bpf_object__init_user_btf_ma return -EINVAL; } if (!btf_is_var(var)) { - pr_warn("map '%s': unexpected var kind %u.\n", - map_name, btf_kind(var)); + pr_warn("map '%s': unexpected var kind %s.\n", + map_name, btf_kind_str(var)); return -EINVAL; } if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && @@@ -2218,8 -2210,8 +2210,8 @@@
def = skip_mods_and_typedefs(obj->btf, var->type, NULL); if (!btf_is_struct(def)) { - pr_warn("map '%s': unexpected def kind %u.\n", - map_name, btf_kind(var)); + pr_warn("map '%s': unexpected def kind %s.\n", + map_name, btf_kind_str(var)); return -EINVAL; } if (def->size > vi->size) { @@@ -2259,12 -2251,11 +2251,11 @@@ static int bpf_object__init_user_btf_ma if (obj->efile.btf_maps_shndx < 0) return 0;
- scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx); - if (scn) - data = elf_getdata(scn, NULL); + scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); + data = elf_sec_data(obj, scn); if (!scn || !data) { - pr_warn("failed to get Elf_Data from map section %d (%s)\n", - obj->efile.btf_maps_shndx, MAPS_ELF_SEC); + pr_warn("elf: failed to get %s map definitions for %s\n", + MAPS_ELF_SEC, obj->path); return -EINVAL; }
@@@ -2322,36 -2313,28 +2313,28 @@@ static int bpf_object__init_maps(struc
static bool section_have_execinstr(struct bpf_object *obj, int idx) { - Elf_Scn *scn; GElf_Shdr sh;
- scn = elf_getscn(obj->efile.elf, idx); - if (!scn) - return false; - - if (gelf_getshdr(scn, &sh) != &sh) + if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh)) return false;
- if (sh.sh_flags & SHF_EXECINSTR) - return true; - - return false; + return sh.sh_flags & SHF_EXECINSTR; }
static bool btf_needs_sanitization(struct bpf_object *obj) { - bool has_func_global = obj->caps.btf_func_global; - bool has_datasec = obj->caps.btf_datasec; - bool has_func = obj->caps.btf_func; + bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); + bool has_func = kernel_supports(FEAT_BTF_FUNC);
return !has_func || !has_datasec || !has_func_global; }
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { - bool has_func_global = obj->caps.btf_func_global; - bool has_datasec = obj->caps.btf_datasec; - bool has_func = obj->caps.btf_func; + bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); + bool has_func = kernel_supports(FEAT_BTF_FUNC); struct btf_type *t; int i, j, vlen;
@@@ -2499,7 -2482,7 +2482,7 @@@ static int bpf_object__load_vmlinux_btf int err;
/* CO-RE relocations need kernel BTF */ - if (obj->btf_ext && obj->btf_ext->field_reloc_info.len) + if (obj->btf_ext && obj->btf_ext->core_relo_info.len) need_vmlinux_btf = true;
bpf_object__for_each_program(prog, obj) { @@@ -2533,6 -2516,15 +2516,15 @@@ static int bpf_object__sanitize_and_loa if (!obj->btf) return 0;
+ if (!kernel_supports(FEAT_BTF)) { + if (kernel_needs_btf(obj)) { + err = -EOPNOTSUPP; + goto report; + } + pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); + return 0; + } + sanitize = btf_needs_sanitization(obj); if (sanitize) { const void *raw_data; @@@ -2558,6 -2550,7 +2550,7 @@@ } btf__free(kern_btf); } + report: if (err) { btf_mandatory = kernel_needs_btf(obj); pr_warn("Error loading .BTF into kernel: %d. %s\n", err, @@@ -2569,61 -2562,199 +2562,199 @@@ return err; }
+ static const char *elf_sym_str(const struct bpf_object *obj, size_t off) + { + const char *name; + + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); + if (!name) { + pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", + off, obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; + } + + static const char *elf_sec_str(const struct bpf_object *obj, size_t off) + { + const char *name; + + name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); + if (!name) { + pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", + off, obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; + } + + static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) + { + Elf_Scn *scn; + + scn = elf_getscn(obj->efile.elf, idx); + if (!scn) { + pr_warn("elf: failed to get section(%zu) from %s: %s\n", + idx, obj->path, elf_errmsg(-1)); + return NULL; + } + return scn; + } + + static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) + { + Elf_Scn *scn = NULL; + Elf *elf = obj->efile.elf; + const char *sec_name; + + while ((scn = elf_nextscn(elf, scn)) != NULL) { + sec_name = elf_sec_name(obj, scn); + if (!sec_name) + return NULL; + + if (strcmp(sec_name, name) != 0) + continue; + + return scn; + } + return NULL; + } + + static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr) + { + if (!scn) + return -EINVAL; + + if (gelf_getshdr(scn, hdr) != hdr) { + pr_warn("elf: failed to get section(%zu) header from %s: %s\n", + elf_ndxscn(scn), obj->path, elf_errmsg(-1)); + return -EINVAL; + } + + return 0; + } + + static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) + { + const char *name; + GElf_Shdr sh; + + if (!scn) + return NULL; + + if (elf_sec_hdr(obj, scn, &sh)) + return NULL; + + name = elf_sec_str(obj, sh.sh_name); + if (!name) { + pr_warn("elf: failed to get section(%zu) name from %s: %s\n", + elf_ndxscn(scn), obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; + } + + static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) + { + Elf_Data *data; + + if (!scn) + return NULL; + + data = elf_getdata(scn, 0); + if (!data) { + pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", + elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", + obj->path, elf_errmsg(-1)); + return NULL; + } + + return data; + } + + static bool is_sec_name_dwarf(const char *name) + { + /* approximation, but the actual list is too long */ + return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; + } + + static bool ignore_elf_section(GElf_Shdr *hdr, const char *name) + { + /* no special handling of .strtab */ + if (hdr->sh_type == SHT_STRTAB) + return true; + + /* ignore .llvm_addrsig section as well */ + if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */) + return true; + + /* no subprograms will lead to an empty .text section, ignore it */ + if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && + strcmp(name, ".text") == 0) + return true; + + /* DWARF sections */ + if (is_sec_name_dwarf(name)) + return true; + + if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { + name += sizeof(".rel") - 1; + /* DWARF section relocations */ + if (is_sec_name_dwarf(name)) + return true; + + /* .BTF and .BTF.ext don't need relocations */ + if (strcmp(name, BTF_ELF_SEC) == 0 || + strcmp(name, BTF_EXT_ELF_SEC) == 0) + return true; + } + + return false; + } + static int bpf_object__elf_collect(struct bpf_object *obj) { Elf *elf = obj->efile.elf; - GElf_Ehdr *ep = &obj->efile.ehdr; Elf_Data *btf_ext_data = NULL; Elf_Data *btf_data = NULL; Elf_Scn *scn = NULL; int idx = 0, err = 0;
- /* Elf is corrupted/truncated, avoid calling elf_strptr. */ - if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { - pr_warn("failed to get e_shstrndx from %s\n", obj->path); - return -LIBBPF_ERRNO__FORMAT; - } - while ((scn = elf_nextscn(elf, scn)) != NULL) { - char *name; + const char *name; GElf_Shdr sh; Elf_Data *data;
idx++; - if (gelf_getshdr(scn, &sh) != &sh) { - pr_warn("failed to get section(%d) header from %s\n", - idx, obj->path); + + if (elf_sec_hdr(obj, scn, &sh)) return -LIBBPF_ERRNO__FORMAT; - }
- name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); - if (!name) { - pr_warn("failed to get section(%d) name from %s\n", - idx, obj->path); + name = elf_sec_str(obj, sh.sh_name); + if (!name) return -LIBBPF_ERRNO__FORMAT; - }
- data = elf_getdata(scn, 0); - if (!data) { - pr_warn("failed to get section(%d) data from %s(%s)\n", - idx, name, obj->path); + if (ignore_elf_section(&sh, name)) + continue; + + data = elf_sec_data(obj, scn); + if (!data) return -LIBBPF_ERRNO__FORMAT; - } - pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", + + pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", idx, name, (unsigned long)data->d_size, (int)sh.sh_link, (unsigned long)sh.sh_flags, (int)sh.sh_type);
if (strcmp(name, "license") == 0) { - err = bpf_object__init_license(obj, - data->d_buf, - data->d_size); + err = bpf_object__init_license(obj, data->d_buf, data->d_size); if (err) return err; } else if (strcmp(name, "version") == 0) { - err = bpf_object__init_kversion(obj, - data->d_buf, - data->d_size); + err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); if (err) return err; } else if (strcmp(name, "maps") == 0) { @@@ -2636,8 -2767,7 +2767,7 @@@ btf_ext_data = data; } else if (sh.sh_type == SHT_SYMTAB) { if (obj->efile.symbols) { - pr_warn("bpf: multiple SYMTAB in %s\n", - obj->path); + pr_warn("elf: multiple symbol tables in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } obj->efile.symbols = data; @@@ -2650,16 -2780,8 +2780,8 @@@ err = bpf_object__add_program(obj, data->d_buf, data->d_size, name, idx); - if (err) { - char errmsg[STRERR_BUFSIZE]; - char *cp; - - cp = libbpf_strerror_r(-err, errmsg, - sizeof(errmsg)); - pr_warn("failed to alloc program %s (%s): %s", - name, obj->path, cp); + if (err) return err; - } } else if (strcmp(name, DATA_SEC) == 0) { obj->efile.data = data; obj->efile.data_shndx = idx; @@@ -2670,7 -2792,8 +2792,8 @@@ obj->efile.st_ops_data = data; obj->efile.st_ops_shndx = idx; } else { - pr_debug("skip section(%d) %s\n", idx, name); + pr_info("elf: skipping unrecognized data section(%d) %s\n", + idx, name); } } else if (sh.sh_type == SHT_REL) { int nr_sects = obj->efile.nr_reloc_sects; @@@ -2681,34 -2804,33 +2804,33 @@@ if (!section_have_execinstr(obj, sec) && strcmp(name, ".rel" STRUCT_OPS_SEC) && strcmp(name, ".rel" MAPS_ELF_SEC)) { - pr_debug("skip relo %s(%d) for section(%d)\n", - name, idx, sec); + pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", + idx, name, sec, + elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>"); continue; }
- sects = reallocarray(sects, nr_sects + 1, - sizeof(*obj->efile.reloc_sects)); - if (!sects) { - pr_warn("reloc_sects realloc failed\n"); + sects = libbpf_reallocarray(sects, nr_sects + 1, + sizeof(*obj->efile.reloc_sects)); + if (!sects) return -ENOMEM; - }
obj->efile.reloc_sects = sects; obj->efile.nr_reloc_sects++;
obj->efile.reloc_sects[nr_sects].shdr = sh; obj->efile.reloc_sects[nr_sects].data = data; - } else if (sh.sh_type == SHT_NOBITS && - strcmp(name, BSS_SEC) == 0) { + } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { obj->efile.bss = data; obj->efile.bss_shndx = idx; } else { - pr_debug("skip section(%d) %s\n", idx, name); + pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, + (size_t)sh.sh_size); } }
if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { - pr_warn("Corrupted ELF file: index of strtab invalid\n"); + pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } return bpf_object__init_btf(obj, btf_data, btf_ext_data); @@@ -2869,14 -2991,13 +2991,13 @@@ static int bpf_object__collect_externs( if (!obj->efile.symbols) return 0;
- scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx); - if (!scn) - return -LIBBPF_ERRNO__FORMAT; - if (gelf_getshdr(scn, &sh) != &sh) + scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); + if (elf_sec_hdr(obj, scn, &sh)) return -LIBBPF_ERRNO__FORMAT; - n = sh.sh_size / sh.sh_entsize;
+ n = sh.sh_size / sh.sh_entsize; pr_debug("looking for externs among %d symbols...\n", n); + for (i = 0; i < n; i++) { GElf_Sym sym;
@@@ -2884,13 -3005,12 +3005,12 @@@ return -LIBBPF_ERRNO__FORMAT; if (!sym_is_extern(&sym)) continue; - ext_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name); + ext_name = elf_sym_str(obj, sym.st_name); if (!ext_name || !ext_name[0]) continue;
ext = obj->externs; - ext = reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); + ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); if (!ext) return -ENOMEM; obj->externs = ext; @@@ -3109,7 -3229,7 +3229,7 @@@ bpf_object__section_to_libbpf_map_type(
static int bpf_program__record_reloc(struct bpf_program *prog, struct reloc_desc *reloc_desc, - __u32 insn_idx, const char *name, + __u32 insn_idx, const char *sym_name, const GElf_Sym *sym, const GElf_Rel *rel) { struct bpf_insn *insn = &prog->insns[insn_idx]; @@@ -3117,22 -3237,25 +3237,25 @@@ struct bpf_object *obj = prog->obj; __u32 shdr_idx = sym->st_shndx; enum libbpf_map_type type; + const char *sym_sec_name; struct bpf_map *map;
/* sub-program call relocation */ if (insn->code == (BPF_JMP | BPF_CALL)) { if (insn->src_reg != BPF_PSEUDO_CALL) { - pr_warn("incorrect bpf_call opcode\n"); + pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); return -LIBBPF_ERRNO__RELOC; } /* text_shndx can be 0, if no default "main" program exists */ if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { - pr_warn("bad call relo against section %u\n", shdr_idx); + sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); + pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", + prog->name, sym_name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } - if (sym->st_value % 8) { - pr_warn("bad call relo offset: %zu\n", - (size_t)sym->st_value); + if (sym->st_value % BPF_INSN_SZ) { + pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", + prog->name, sym_name, (size_t)sym->st_value); return -LIBBPF_ERRNO__RELOC; } reloc_desc->type = RELO_CALL; @@@ -3143,8 -3266,8 +3266,8 @@@ }
if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) { - pr_warn("invalid relo for insns[%d].code 0x%x\n", - insn_idx, insn->code); + pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", + prog->name, sym_name, insn_idx, insn->code); return -LIBBPF_ERRNO__RELOC; }
@@@ -3159,12 -3282,12 +3282,12 @@@ break; } if (i >= n) { - pr_warn("extern relo failed to find extern for sym %d\n", - sym_idx); + pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", + prog->name, sym_name, sym_idx); return -LIBBPF_ERRNO__RELOC; } - pr_debug("found extern #%d '%s' (sym %d) for insn %u\n", - i, ext->name, ext->sym_idx, insn_idx); + pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", + prog->name, i, ext->name, ext->sym_idx, insn_idx); reloc_desc->type = RELO_EXTERN; reloc_desc->insn_idx = insn_idx; reloc_desc->sym_off = i; /* sym_off stores extern index */ @@@ -3172,18 -3295,19 +3295,19 @@@ }
if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { - pr_warn("invalid relo for '%s' in special section 0x%x; forgot to initialize global var?..\n", - name, shdr_idx); + pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", + prog->name, sym_name, shdr_idx); return -LIBBPF_ERRNO__RELOC; }
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); + sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
/* generic map reference relocation */ if (type == LIBBPF_MAP_UNSPEC) { if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { - pr_warn("bad map relo against section %u\n", - shdr_idx); + pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", + prog->name, sym_name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } for (map_idx = 0; map_idx < nr_maps; map_idx++) { @@@ -3192,14 -3316,14 +3316,14 @@@ map->sec_idx != sym->st_shndx || map->sec_offset != sym->st_value) continue; - pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n", - map_idx, map->name, map->sec_idx, + pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", + prog->name, map_idx, map->name, map->sec_idx, map->sec_offset, insn_idx); break; } if (map_idx >= nr_maps) { - pr_warn("map relo failed to find map for sec %u, off %zu\n", - shdr_idx, (size_t)sym->st_value); + pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", + prog->name, sym_sec_name, (size_t)sym->st_value); return -LIBBPF_ERRNO__RELOC; } reloc_desc->type = RELO_LD64; @@@ -3211,21 -3335,22 +3335,22 @@@
/* global data map relocation */ if (!bpf_object__shndx_is_data(obj, shdr_idx)) { - pr_warn("bad data relo against section %u\n", shdr_idx); + pr_warn("prog '%s': bad data relo against section '%s'\n", + prog->name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } for (map_idx = 0; map_idx < nr_maps; map_idx++) { map = &obj->maps[map_idx]; if (map->libbpf_type != type) continue; - pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n", - map_idx, map->name, map->sec_idx, map->sec_offset, - insn_idx); + pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", + prog->name, map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); break; } if (map_idx >= nr_maps) { - pr_warn("data relo failed to find map for sec %u\n", - shdr_idx); + pr_warn("prog '%s': data relo failed to find map for section '%s'\n", + prog->name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; }
@@@ -3241,9 -3366,17 +3366,17 @@@ bpf_program__collect_reloc(struct bpf_p Elf_Data *data, struct bpf_object *obj) { Elf_Data *symbols = obj->efile.symbols; + const char *relo_sec_name, *sec_name; + size_t sec_idx = shdr->sh_info; int err, i, nrels;
- pr_debug("collecting relocating info for: '%s'\n", prog->section_name); + relo_sec_name = elf_sec_str(obj, shdr->sh_name); + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + if (!relo_sec_name || !sec_name) + return -EINVAL; + + pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", + relo_sec_name, sec_idx, sec_name); nrels = shdr->sh_size / shdr->sh_entsize;
prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); @@@ -3254,35 -3387,34 +3387,34 @@@ prog->nr_reloc = nrels;
for (i = 0; i < nrels; i++) { - const char *name; + const char *sym_name; __u32 insn_idx; GElf_Sym sym; GElf_Rel rel;
if (!gelf_getrel(data, i, &rel)) { - pr_warn("relocation: failed to get %d reloc\n", i); + pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); return -LIBBPF_ERRNO__FORMAT; } if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { - pr_warn("relocation: symbol %"PRIx64" not found\n", - GELF_R_SYM(rel.r_info)); + pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n", + relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); return -LIBBPF_ERRNO__FORMAT; } - if (rel.r_offset % sizeof(struct bpf_insn)) + if (rel.r_offset % BPF_INSN_SZ) { + pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", + relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); return -LIBBPF_ERRNO__FORMAT; + }
- insn_idx = rel.r_offset / sizeof(struct bpf_insn); - name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name) ? : "<?>"; + insn_idx = rel.r_offset / BPF_INSN_SZ; + sym_name = elf_sym_str(obj, sym.st_name) ?: "<?>";
- pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d ('%s'), insn %u\n", - (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info), - (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info), - GELF_ST_BIND(sym.st_info), sym.st_name, name, - insn_idx); + pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", + relo_sec_name, i, insn_idx, sym_name);
err = bpf_program__record_reloc(prog, &prog->reloc_desc[i], - insn_idx, name, &sym, &rel); + insn_idx, sym_name, &sym, &rel); if (err) return err; } @@@ -3433,8 -3565,14 +3565,14 @@@ bpf_object__probe_loading(struct bpf_ob return 0; }
- static int - bpf_object__probe_name(struct bpf_object *obj) + static int probe_fd(int fd) + { + if (fd >= 0) + close(fd); + return fd >= 0; + } + + static int probe_kern_prog_name(void) { struct bpf_load_program_attr attr; struct bpf_insn insns[] = { @@@ -3452,16 -3590,10 +3590,10 @@@ attr.license = "GPL"; attr.name = "test"; ret = bpf_load_program_xattr(&attr, NULL, 0); - if (ret >= 0) { - obj->caps.name = 1; - close(ret); - } - - return 0; + return probe_fd(ret); }
- static int - bpf_object__probe_global_data(struct bpf_object *obj) + static int probe_kern_global_data(void) { struct bpf_load_program_attr prg_attr; struct bpf_create_map_attr map_attr; @@@ -3498,16 -3630,23 +3630,23 @@@ prg_attr.license = "GPL";
ret = bpf_load_program_xattr(&prg_attr, NULL, 0); - if (ret >= 0) { - obj->caps.global_data = 1; - close(ret); - } - close(map); - return 0; + return probe_fd(ret); + } + + static int probe_kern_btf(void) + { + static const char strs[] = "\0int"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); }
- static int bpf_object__probe_btf_func(struct bpf_object *obj) + static int probe_kern_btf_func(void) { static const char strs[] = "\0int\0x\0a"; /* void x(int a) {} */ @@@ -3520,20 -3659,12 +3659,12 @@@ /* FUNC x */ /* [3] */ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), }; - int btf_fd;
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (btf_fd >= 0) { - obj->caps.btf_func = 1; - close(btf_fd); - return 1; - } - - return 0; + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); }
- static int bpf_object__probe_btf_func_global(struct bpf_object *obj) + static int probe_kern_btf_func_global(void) { static const char strs[] = "\0int\0x\0a"; /* static void x(int a) {} */ @@@ -3546,20 -3677,12 +3677,12 @@@ /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), }; - int btf_fd;
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (btf_fd >= 0) { - obj->caps.btf_func_global = 1; - close(btf_fd); - return 1; - } - - return 0; + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); }
- static int bpf_object__probe_btf_datasec(struct bpf_object *obj) + static int probe_kern_btf_datasec(void) { static const char strs[] = "\0x\0.data"; /* static int a; */ @@@ -3573,20 -3696,12 +3696,12 @@@ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), BTF_VAR_SECINFO_ENC(2, 0, 4), }; - int btf_fd; - - btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (btf_fd >= 0) { - obj->caps.btf_datasec = 1; - close(btf_fd); - return 1; - }
- return 0; + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); }
- static int bpf_object__probe_array_mmap(struct bpf_object *obj) + static int probe_kern_array_mmap(void) { struct bpf_create_map_attr attr = { .map_type = BPF_MAP_TYPE_ARRAY, @@@ -3595,27 -3710,17 +3710,17 @@@ .value_size = sizeof(int), .max_entries = 1, }; - int fd; - - fd = bpf_create_map_xattr(&attr); - if (fd >= 0) { - obj->caps.array_mmap = 1; - close(fd); - return 1; - }
- return 0; + return probe_fd(bpf_create_map_xattr(&attr)); }
- static int - bpf_object__probe_exp_attach_type(struct bpf_object *obj) + static int probe_kern_exp_attach_type(void) { struct bpf_load_program_attr attr; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - int fd;
memset(&attr, 0, sizeof(attr)); /* use any valid combination of program type and (optional) @@@ -3629,36 -3734,91 +3734,91 @@@ attr.insns_cnt = ARRAY_SIZE(insns); attr.license = "GPL";
- fd = bpf_load_program_xattr(&attr, NULL, 0); - if (fd >= 0) { - obj->caps.exp_attach_type = 1; - close(fd); - return 1; - } - return 0; + return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); }
- static int - bpf_object__probe_caps(struct bpf_object *obj) - { - int (*probe_fn[])(struct bpf_object *obj) = { - bpf_object__probe_name, - bpf_object__probe_global_data, - bpf_object__probe_btf_func, - bpf_object__probe_btf_func_global, - bpf_object__probe_btf_datasec, - bpf_object__probe_array_mmap, - bpf_object__probe_exp_attach_type, + static int probe_kern_probe_read_kernel(void) + { + struct bpf_load_program_attr attr; + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ + BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ + BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), + BPF_EXIT_INSN(), }; - int i, ret;
- for (i = 0; i < ARRAY_SIZE(probe_fn); i++) { - ret = probe_fn[i](obj); - if (ret < 0) - pr_debug("Probe #%d failed with %d.\n", i, ret); + memset(&attr, 0, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_KPROBE; + attr.insns = insns; + attr.insns_cnt = ARRAY_SIZE(insns); + attr.license = "GPL"; + + return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); + } + + enum kern_feature_result { + FEAT_UNKNOWN = 0, + FEAT_SUPPORTED = 1, + FEAT_MISSING = 2, + }; + + typedef int (*feature_probe_fn)(void); + + static struct kern_feature_desc { + const char *desc; + feature_probe_fn probe; + enum kern_feature_result res; + } feature_probes[__FEAT_CNT] = { + [FEAT_PROG_NAME] = { + "BPF program name", probe_kern_prog_name, + }, + [FEAT_GLOBAL_DATA] = { + "global variables", probe_kern_global_data, + }, + [FEAT_BTF] = { + "minimal BTF", probe_kern_btf, + }, + [FEAT_BTF_FUNC] = { + "BTF functions", probe_kern_btf_func, + }, + [FEAT_BTF_GLOBAL_FUNC] = { + "BTF global function", probe_kern_btf_func_global, + }, + [FEAT_BTF_DATASEC] = { + "BTF data section and variable", probe_kern_btf_datasec, + }, + [FEAT_ARRAY_MMAP] = { + "ARRAY map mmap()", probe_kern_array_mmap, + }, + [FEAT_EXP_ATTACH_TYPE] = { + "BPF_PROG_LOAD expected_attach_type attribute", + probe_kern_exp_attach_type, + }, + [FEAT_PROBE_READ_KERN] = { + "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, } + };
- return 0; + static bool kernel_supports(enum kern_feature_id feat_id) + { + struct kern_feature_desc *feat = &feature_probes[feat_id]; + int ret; + + if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { + ret = feat->probe(); + if (ret > 0) { + WRITE_ONCE(feat->res, FEAT_SUPPORTED); + } else if (ret == 0) { + WRITE_ONCE(feat->res, FEAT_MISSING); + } else { + pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); + WRITE_ONCE(feat->res, FEAT_MISSING); + } + } + + return READ_ONCE(feat->res) == FEAT_SUPPORTED; }
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) @@@ -3760,7 -3920,7 +3920,7 @@@ static int bpf_object__create_map(struc
memset(&create_attr, 0, sizeof(create_attr));
- if (obj->caps.name) + if (kernel_supports(FEAT_PROG_NAME)) create_attr.name = map->name; create_attr.map_ifindex = map->map_ifindex; create_attr.map_type = def->type; @@@ -4011,6 -4171,10 +4171,10 @@@ struct bpf_core_spec const struct btf *btf; /* high-level spec: named fields and array indices only */ struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; + /* original unresolved (no skip_mods_or_typedefs) root type ID */ + __u32 root_type_id; + /* CO-RE relocation kind */ + enum bpf_core_relo_kind relo_kind; /* high-level spec length */ int len; /* raw, low-level spec: 1-to-1 with accessor spec string */ @@@ -4041,8 -4205,66 +4205,66 @@@ static bool is_flex_arr(const struct bt return acc->idx == btf_vlen(t) - 1; }
+ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) + { + switch (kind) { + case BPF_FIELD_BYTE_OFFSET: return "byte_off"; + case BPF_FIELD_BYTE_SIZE: return "byte_sz"; + case BPF_FIELD_EXISTS: return "field_exists"; + case BPF_FIELD_SIGNED: return "signed"; + case BPF_FIELD_LSHIFT_U64: return "lshift_u64"; + case BPF_FIELD_RSHIFT_U64: return "rshift_u64"; + case BPF_TYPE_ID_LOCAL: return "local_type_id"; + case BPF_TYPE_ID_TARGET: return "target_type_id"; + case BPF_TYPE_EXISTS: return "type_exists"; + case BPF_TYPE_SIZE: return "type_size"; + case BPF_ENUMVAL_EXISTS: return "enumval_exists"; + case BPF_ENUMVAL_VALUE: return "enumval_value"; + default: return "unknown"; + } + } + + static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) + { + switch (kind) { + case BPF_FIELD_BYTE_OFFSET: + case BPF_FIELD_BYTE_SIZE: + case BPF_FIELD_EXISTS: + case BPF_FIELD_SIGNED: + case BPF_FIELD_LSHIFT_U64: + case BPF_FIELD_RSHIFT_U64: + return true; + default: + return false; + } + } + + static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) + { + switch (kind) { + case BPF_TYPE_ID_LOCAL: + case BPF_TYPE_ID_TARGET: + case BPF_TYPE_EXISTS: + case BPF_TYPE_SIZE: + return true; + default: + return false; + } + } + + static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) + { + switch (kind) { + case BPF_ENUMVAL_EXISTS: + case BPF_ENUMVAL_VALUE: + return true; + default: + return false; + } + } + /* - * Turn bpf_field_reloc into a low- and high-level spec representation, + * Turn bpf_core_relo into a low- and high-level spec representation, * validating correctness along the way, as well as calculating resulting * field bit offset, specified by accessor string. Low-level spec captures * every single level of nestedness, including traversing anonymous @@@ -4071,10 -4293,17 +4293,17 @@@ * - field 'a' access (corresponds to '2' in low-level spec); * - array element #3 access (corresponds to '3' in low-level spec). * + * Type-based relocations (TYPE_EXISTS/TYPE_SIZE, + * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their + * spec and raw_spec are kept empty. + * + * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access + * string to specify enumerator's value index that need to be relocated. */ - static int bpf_core_spec_parse(const struct btf *btf, + static int bpf_core_parse_spec(const struct btf *btf, __u32 type_id, const char *spec_str, + enum bpf_core_relo_kind relo_kind, struct bpf_core_spec *spec) { int access_idx, parsed_len, i; @@@ -4089,6 -4318,15 +4318,15 @@@
memset(spec, 0, sizeof(*spec)); spec->btf = btf; + spec->root_type_id = type_id; + spec->relo_kind = relo_kind; + + /* type-based relocations don't have a field access string */ + if (core_relo_is_type_based(relo_kind)) { + if (strcmp(spec_str, "0")) + return -EINVAL; + return 0; + }
/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ while (*spec_str) { @@@ -4105,16 -4343,28 +4343,28 @@@ if (spec->raw_len == 0) return -EINVAL;
- /* first spec value is always reloc type array index */ t = skip_mods_and_typedefs(btf, type_id, &id); if (!t) return -EINVAL;
access_idx = spec->raw_spec[0]; - spec->spec[0].type_id = id; - spec->spec[0].idx = access_idx; + acc = &spec->spec[0]; + acc->type_id = id; + acc->idx = access_idx; spec->len++;
+ if (core_relo_is_enumval_based(relo_kind)) { + if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) + return -EINVAL; + + /* record enumerator name in a first accessor */ + acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); + return 0; + } + + if (!core_relo_is_field_based(relo_kind)) + return -EINVAL; + sz = btf__resolve_size(btf, id); if (sz < 0) return sz; @@@ -4172,8 -4422,8 +4422,8 @@@ return sz; spec->bit_offset += access_idx * sz * 8; } else { - pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n", - type_id, spec_str, i, id, btf_kind(t)); + pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", + type_id, spec_str, i, id, btf_kind_str(t)); return -EINVAL; } } @@@ -4223,16 -4473,16 +4473,16 @@@ static struct ids_vec *bpf_core_find_ca { size_t local_essent_len, targ_essent_len; const char *local_name, *targ_name; - const struct btf_type *t; + const struct btf_type *t, *local_t; struct ids_vec *cand_ids; __u32 *new_ids; int i, err, n;
- t = btf__type_by_id(local_btf, local_type_id); - if (!t) + local_t = btf__type_by_id(local_btf, local_type_id); + if (!local_t) return ERR_PTR(-EINVAL);
- local_name = btf__name_by_offset(local_btf, t->name_off); + local_name = btf__name_by_offset(local_btf, local_t->name_off); if (str_is_empty(local_name)) return ERR_PTR(-EINVAL); local_essent_len = bpf_core_essential_name_len(local_name); @@@ -4244,12 -4494,11 +4494,11 @@@ n = btf__get_nr_types(targ_btf); for (i = 1; i <= n; i++) { t = btf__type_by_id(targ_btf, i); - targ_name = btf__name_by_offset(targ_btf, t->name_off); - if (str_is_empty(targ_name)) + if (btf_kind(t) != btf_kind(local_t)) continue;
- t = skip_mods_and_typedefs(targ_btf, i, NULL); - if (!btf_is_composite(t) && !btf_is_array(t)) + targ_name = btf__name_by_offset(targ_btf, t->name_off); + if (str_is_empty(targ_name)) continue;
targ_essent_len = bpf_core_essential_name_len(targ_name); @@@ -4257,11 -4506,12 +4506,12 @@@ continue;
if (strncmp(local_name, targ_name, local_essent_len) == 0) { - pr_debug("[%d] %s: found candidate [%d] %s\n", - local_type_id, local_name, i, targ_name); - new_ids = reallocarray(cand_ids->data, - cand_ids->len + 1, - sizeof(*cand_ids->data)); + pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", + local_type_id, btf_kind_str(local_t), + local_name, i, btf_kind_str(t), targ_name); + new_ids = libbpf_reallocarray(cand_ids->data, + cand_ids->len + 1, + sizeof(*cand_ids->data)); if (!new_ids) { err = -ENOMEM; goto err_out; @@@ -4276,8 -4526,9 +4526,9 @@@ err_out return ERR_PTR(err); }
- /* Check two types for compatibility, skipping const/volatile/restrict and - * typedefs, to ensure we are relocating compatible entities: + /* Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: * - any two STRUCTs/UNIONs are compatible and can be mixed; * - any two FWDs are compatible, if their names match (modulo flavor suffix); * - any two PTRs are always compatible; @@@ -4411,25 -4662,119 +4662,119 @@@ static int bpf_core_match_member(const /* matching named field */ struct bpf_core_accessor *targ_acc;
- targ_acc = &spec->spec[spec->len++]; - targ_acc->type_id = targ_id; - targ_acc->idx = i; - targ_acc->name = targ_name; + targ_acc = &spec->spec[spec->len++]; + targ_acc->type_id = targ_id; + targ_acc->idx = i; + targ_acc->name = targ_name; + + *next_targ_id = m->type; + found = bpf_core_fields_are_compat(local_btf, + local_member->type, + targ_btf, m->type); + if (!found) + spec->len--; /* pop accessor */ + return found; + } + /* member turned out not to be what we looked for */ + spec->bit_offset -= bit_offset; + spec->raw_len--; + } + + return 0; + } + + /* Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + */ + static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, + const struct btf *targ_btf, __u32 targ_id) + { + const struct btf_type *local_type, *targ_type; + int depth = 32; /* max recursion depth */ + + /* caller made sure that names match (ignoring flavor suffix) */ + local_type = btf__type_by_id(local_btf, local_id); + targ_type = btf__type_by_id(targ_btf, targ_id); + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + + recur: + depth--; + if (depth < 0) + return -EINVAL;
- *next_targ_id = m->type; - found = bpf_core_fields_are_compat(local_btf, - local_member->type, - targ_btf, m->type); - if (!found) - spec->len--; /* pop accessor */ - return found; + local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); + targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); + if (!local_type || !targ_type) + return -EINVAL; + + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + + switch (btf_kind(local_type)) { + case BTF_KIND_UNKN: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + return 1; + case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ + return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; + case BTF_KIND_PTR: + local_id = local_type->type; + targ_id = targ_type->type; + goto recur; + case BTF_KIND_ARRAY: + local_id = btf_array(local_type)->type; + targ_id = btf_array(targ_type)->type; + goto recur; + case BTF_KIND_FUNC_PROTO: { + struct btf_param *local_p = btf_params(local_type); + struct btf_param *targ_p = btf_params(targ_type); + __u16 local_vlen = btf_vlen(local_type); + __u16 targ_vlen = btf_vlen(targ_type); + int i, err; + + if (local_vlen != targ_vlen) + return 0; + + for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { + skip_mods_and_typedefs(local_btf, local_p->type, &local_id); + skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); + err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id); + if (err <= 0) + return err; } - /* member turned out not to be what we looked for */ - spec->bit_offset -= bit_offset; - spec->raw_len--; - }
- return 0; + /* tail recurse for return type check */ + skip_mods_and_typedefs(local_btf, local_type->type, &local_id); + skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); + goto recur; + } + default: + pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", + btf_kind_str(local_type), local_id, targ_id); + return 0; + } }
/* @@@ -4447,10 -4792,51 +4792,51 @@@ static int bpf_core_spec_match(struct b
memset(targ_spec, 0, sizeof(*targ_spec)); targ_spec->btf = targ_btf; + targ_spec->root_type_id = targ_id; + targ_spec->relo_kind = local_spec->relo_kind; + + if (core_relo_is_type_based(local_spec->relo_kind)) { + return bpf_core_types_are_compat(local_spec->btf, + local_spec->root_type_id, + targ_btf, targ_id); + }
local_acc = &local_spec->spec[0]; targ_acc = &targ_spec->spec[0];
+ if (core_relo_is_enumval_based(local_spec->relo_kind)) { + size_t local_essent_len, targ_essent_len; + const struct btf_enum *e; + const char *targ_name; + + /* has to resolve to an enum */ + targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); + if (!btf_is_enum(targ_type)) + return 0; + + local_essent_len = bpf_core_essential_name_len(local_acc->name); + + for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) { + targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); + targ_essent_len = bpf_core_essential_name_len(targ_name); + if (targ_essent_len != local_essent_len) + continue; + if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { + targ_acc->type_id = targ_id; + targ_acc->idx = i; + targ_acc->name = targ_name; + targ_spec->len++; + targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; + targ_spec->raw_len++; + return 1; + } + } + return 0; + } + + if (!core_relo_is_field_based(local_spec->relo_kind)) + return -EINVAL; + for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); @@@ -4507,18 -4893,29 +4893,29 @@@ }
static int bpf_core_calc_field_relo(const struct bpf_program *prog, - const struct bpf_field_reloc *relo, + const struct bpf_core_relo *relo, const struct bpf_core_spec *spec, __u32 *val, bool *validate) { - const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1]; - const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id); + const struct bpf_core_accessor *acc; + const struct btf_type *t; __u32 byte_off, byte_sz, bit_off, bit_sz; const struct btf_member *m; const struct btf_type *mt; bool bitfield; __s64 sz;
+ if (relo->kind == BPF_FIELD_EXISTS) { + *val = spec ? 1 : 0; + return 0; + } + + if (!spec) + return -EUCLEAN; /* request instruction poisoning */ + + acc = &spec->spec[spec->len - 1]; + t = btf__type_by_id(spec->btf, acc->type_id); + /* a[n] accessor needs special handling */ if (!acc->name) { if (relo->kind == BPF_FIELD_BYTE_OFFSET) { @@@ -4604,21 -5001,158 +5001,158 @@@ break; case BPF_FIELD_EXISTS: default: - pr_warn("prog '%s': unknown relo %d at insn #%d\n", - bpf_program__title(prog, false), - relo->kind, relo->insn_off / 8); - return -EINVAL; + return -EOPNOTSUPP; + } + + return 0; + } + + static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val) + { + __s64 sz; + + /* type-based relos return zero when target type is not found */ + if (!spec) { + *val = 0; + return 0; + } + + switch (relo->kind) { + case BPF_TYPE_ID_TARGET: + *val = spec->root_type_id; + break; + case BPF_TYPE_EXISTS: + *val = 1; + break; + case BPF_TYPE_SIZE: + sz = btf__resolve_size(spec->btf, spec->root_type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + break; + case BPF_TYPE_ID_LOCAL: + /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */ + default: + return -EOPNOTSUPP; + } + + return 0; + } + + static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val) + { + const struct btf_type *t; + const struct btf_enum *e; + + switch (relo->kind) { + case BPF_ENUMVAL_EXISTS: + *val = spec ? 1 : 0; + break; + case BPF_ENUMVAL_VALUE: + if (!spec) + return -EUCLEAN; /* request instruction poisoning */ + t = btf__type_by_id(spec->btf, spec->spec[0].type_id); + e = btf_enum(t) + spec->spec[0].idx; + *val = e->val; + break; + default: + return -EOPNOTSUPP; }
return 0; }
+ struct bpf_core_relo_res + { + /* expected value in the instruction, unless validate == false */ + __u32 orig_val; + /* new value that needs to be patched up to */ + __u32 new_val; + /* relocation unsuccessful, poison instruction, but don't fail load */ + bool poison; + /* some relocations can't be validated against orig_val */ + bool validate; + }; + + /* Calculate original and target relocation values, given local and target + * specs and relocation kind. These values are calculated for each candidate. + * If there are multiple candidates, resulting values should all be consistent + * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. + * If instruction has to be poisoned, *poison will be set to true. + */ + static int bpf_core_calc_relo(const struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct bpf_core_spec *local_spec, + const struct bpf_core_spec *targ_spec, + struct bpf_core_relo_res *res) + { + int err = -EOPNOTSUPP; + + res->orig_val = 0; + res->new_val = 0; + res->poison = false; + res->validate = true; + + if (core_relo_is_field_based(relo->kind)) { + err = bpf_core_calc_field_relo(prog, relo, local_spec, &res->orig_val, &res->validate); + err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, &res->new_val, NULL); + } else if (core_relo_is_type_based(relo->kind)) { + err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); + err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); + } else if (core_relo_is_enumval_based(relo->kind)) { + err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); + err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); + } + + if (err == -EUCLEAN) { + /* EUCLEAN is used to signal instruction poisoning request */ + res->poison = true; + err = 0; + } else if (err == -EOPNOTSUPP) { + /* EOPNOTSUPP means unknown/unsupported relocation */ + pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", + bpf_program__title(prog, false), relo_idx, + core_relo_kind_str(relo->kind), relo->kind, relo->insn_off / 8); + } + + return err; + } + + /* + * Turn instruction for which CO_RE relocation failed into invalid one with + * distinct signature. + */ + static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx, + int insn_idx, struct bpf_insn *insn) + { + pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", + bpf_program__title(prog, false), relo_idx, insn_idx); + insn->code = BPF_JMP | BPF_CALL; + insn->dst_reg = 0; + insn->src_reg = 0; + insn->off = 0; + /* if this instruction is reachable (not a dead code), + * verifier will complain with the following message: + * invalid func unknown#195896080 + */ + insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ + } + + static bool is_ldimm64(struct bpf_insn *insn) + { + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); + } + /* * Patch relocatable BPF instruction. * * Patched value is determined by relocation kind and target specification. - * For field existence relocation target spec will be NULL if field is not - * found. + * For existence relocations target spec will be NULL if field/type is not found. * Expected insn->imm value is determined using relocation kind and local * spec, and is checked before patching instruction. If actual insn->imm value * is wrong, bail out with error. @@@ -4626,58 -5160,43 +5160,43 @@@ * Currently three kinds of BPF instructions are supported: * 1. rX = <imm> (assignment with immediate operand); * 2. rX += <imm> (arithmetic operations with immediate operand); + * 3. rX = <imm64> (load with 64-bit immediate value). */ - static int bpf_core_reloc_insn(struct bpf_program *prog, - const struct bpf_field_reloc *relo, + static int bpf_core_patch_insn(struct bpf_program *prog, + const struct bpf_core_relo *relo, int relo_idx, - const struct bpf_core_spec *local_spec, - const struct bpf_core_spec *targ_spec) + const struct bpf_core_relo_res *res) { __u32 orig_val, new_val; struct bpf_insn *insn; - bool validate = true; - int insn_idx, err; + int insn_idx; __u8 class;
- if (relo->insn_off % sizeof(struct bpf_insn)) + if (relo->insn_off % BPF_INSN_SZ) return -EINVAL; - insn_idx = relo->insn_off / sizeof(struct bpf_insn); + insn_idx = relo->insn_off / BPF_INSN_SZ; insn = &prog->insns[insn_idx]; class = BPF_CLASS(insn->code);
- if (relo->kind == BPF_FIELD_EXISTS) { - orig_val = 1; /* can't generate EXISTS relo w/o local field */ - new_val = targ_spec ? 1 : 0; - } else if (!targ_spec) { - pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", - bpf_program__title(prog, false), relo_idx, insn_idx); - insn->code = BPF_JMP | BPF_CALL; - insn->dst_reg = 0; - insn->src_reg = 0; - insn->off = 0; - /* if this instruction is reachable (not a dead code), - * verifier will complain with the following message: - * invalid func unknown#195896080 + if (res->poison) { + /* poison second part of ldimm64 to avoid confusing error from + * verifier about "unknown opcode 00" */ - insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ + if (is_ldimm64(insn)) + bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1); + bpf_core_poison_insn(prog, relo_idx, insn_idx, insn); return 0; - } else { - err = bpf_core_calc_field_relo(prog, relo, local_spec, - &orig_val, &validate); - if (err) - return err; - err = bpf_core_calc_field_relo(prog, relo, targ_spec, - &new_val, NULL); - if (err) - return err; }
+ orig_val = res->orig_val; + new_val = res->new_val; + switch (class) { case BPF_ALU: case BPF_ALU64: if (BPF_SRC(insn->code) != BPF_K) return -EINVAL; - if (validate && insn->imm != orig_val) { + if (res->validate && insn->imm != orig_val) { pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", bpf_program__title(prog, false), relo_idx, insn_idx, insn->imm, orig_val, new_val); @@@ -4692,8 -5211,8 +5211,8 @@@ case BPF_LDX: case BPF_ST: case BPF_STX: - if (validate && insn->off != orig_val) { - pr_warn("prog '%s': relo #%d: unexpected insn #%d (LD/LDX/ST/STX) value: got %u, exp %u -> %u\n", + if (res->validate && insn->off != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", bpf_program__title(prog, false), relo_idx, insn_idx, insn->off, orig_val, new_val); return -EINVAL; @@@ -4710,8 -5229,37 +5229,37 @@@ bpf_program__title(prog, false), relo_idx, insn_idx, orig_val, new_val); break; + case BPF_LD: { + __u64 imm; + + if (!is_ldimm64(insn) || + insn[0].src_reg != 0 || insn[0].off != 0 || + insn_idx + 1 >= prog->insns_cnt || + insn[1].code != 0 || insn[1].dst_reg != 0 || + insn[1].src_reg != 0 || insn[1].off != 0) { + pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n", + bpf_program__title(prog, false), relo_idx, insn_idx); + return -EINVAL; + } + + imm = insn[0].imm + ((__u64)insn[1].imm << 32); + if (res->validate && imm != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", + bpf_program__title(prog, false), relo_idx, + insn_idx, (unsigned long long)imm, + orig_val, new_val); + return -EINVAL; + } + + insn[0].imm = new_val; + insn[1].imm = 0; /* currently only 32-bit values are supported */ + pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", + bpf_program__title(prog, false), relo_idx, insn_idx, + (unsigned long long)imm, new_val); + break; + } default: - pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n", + pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n", bpf_program__title(prog, false), relo_idx, insn_idx, insn->code, insn->src_reg, insn->dst_reg, insn->off, insn->imm); @@@ -4728,29 -5276,48 +5276,48 @@@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) { const struct btf_type *t; + const struct btf_enum *e; const char *s; __u32 type_id; int i;
- type_id = spec->spec[0].type_id; + type_id = spec->root_type_id; t = btf__type_by_id(spec->btf, type_id); s = btf__name_by_offset(spec->btf, t->name_off); - libbpf_print(level, "[%u] %s + ", type_id, s);
- for (i = 0; i < spec->raw_len; i++) - libbpf_print(level, "%d%s", spec->raw_spec[i], - i == spec->raw_len - 1 ? " => " : ":"); + libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
- libbpf_print(level, "%u.%u @ &x", - spec->bit_offset / 8, spec->bit_offset % 8); + if (core_relo_is_type_based(spec->relo_kind)) + return;
- for (i = 0; i < spec->len; i++) { - if (spec->spec[i].name) - libbpf_print(level, ".%s", spec->spec[i].name); - else - libbpf_print(level, "[%u]", spec->spec[i].idx); + if (core_relo_is_enumval_based(spec->relo_kind)) { + t = skip_mods_and_typedefs(spec->btf, type_id, NULL); + e = btf_enum(t) + spec->raw_spec[0]; + s = btf__name_by_offset(spec->btf, e->name_off); + + libbpf_print(level, "::%s = %u", s, e->val); + return; }
+ if (core_relo_is_field_based(spec->relo_kind)) { + for (i = 0; i < spec->len; i++) { + if (spec->spec[i].name) + libbpf_print(level, ".%s", spec->spec[i].name); + else if (i > 0 || spec->spec[i].idx > 0) + libbpf_print(level, "[%u]", spec->spec[i].idx); + } + + libbpf_print(level, " ("); + for (i = 0; i < spec->raw_len; i++) + libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); + + if (spec->bit_offset % 8) + libbpf_print(level, " @ offset %u.%u)", + spec->bit_offset / 8, spec->bit_offset % 8); + else + libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); + return; + } }
static size_t bpf_core_hash_fn(const void *key, void *ctx) @@@ -4814,22 -5381,23 +5381,23 @@@ static void *u32_as_hash_key(__u32 x * CPU-wise compared to prebuilding a map from all local type names to * a list of candidate type names. It's also sped up by caching resolved * list of matching candidates per each local "root" type ID, that has at - * least one bpf_field_reloc associated with it. This list is shared + * least one bpf_core_relo associated with it. This list is shared * between multiple relocations for the same type ID and is updated as some * of the candidates are pruned due to structural incompatibility. */ - static int bpf_core_reloc_field(struct bpf_program *prog, - const struct bpf_field_reloc *relo, - int relo_idx, - const struct btf *local_btf, - const struct btf *targ_btf, - struct hashmap *cand_cache) + static int bpf_core_apply_relo(struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct btf *local_btf, + const struct btf *targ_btf, + struct hashmap *cand_cache) { const char *prog_name = bpf_program__title(prog, false); - struct bpf_core_spec local_spec, cand_spec, targ_spec; + struct bpf_core_spec local_spec, cand_spec, targ_spec = {}; const void *type_key = u32_as_hash_key(relo->type_id); - const struct btf_type *local_type, *cand_type; - const char *local_name, *cand_name; + struct bpf_core_relo_res cand_res, targ_res; + const struct btf_type *local_type; + const char *local_name; struct ids_vec *cand_ids; __u32 local_id, cand_id; const char *spec_str; @@@ -4841,32 -5409,49 +5409,49 @@@ return -EINVAL;
local_name = btf__name_by_offset(local_btf, local_type->name_off); - if (str_is_empty(local_name)) + if (!local_name) return -EINVAL;
spec_str = btf__name_by_offset(local_btf, relo->access_str_off); if (str_is_empty(spec_str)) return -EINVAL;
- err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec); + err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); if (err) { - pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n", - prog_name, relo_idx, local_id, local_name, spec_str, - err); + pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", + prog_name, relo_idx, local_id, btf_kind_str(local_type), + str_is_empty(local_name) ? "<anon>" : local_name, + spec_str, err); return -EINVAL; }
- pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx, - relo->kind); + pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name, + relo_idx, core_relo_kind_str(relo->kind), relo->kind); bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); libbpf_print(LIBBPF_DEBUG, "\n");
+ /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ + if (relo->kind == BPF_TYPE_ID_LOCAL) { + targ_res.validate = true; + targ_res.poison = false; + targ_res.orig_val = local_spec.root_type_id; + targ_res.new_val = local_spec.root_type_id; + goto patch_insn; + } + + /* libbpf doesn't support candidate search for anonymous types */ + if (str_is_empty(spec_str)) { + pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n", + prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); + return -EOPNOTSUPP; + } + if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) { cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf); if (IS_ERR(cand_ids)) { - pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld", - prog_name, relo_idx, local_id, local_name, - PTR_ERR(cand_ids)); + pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld", + prog_name, relo_idx, local_id, btf_kind_str(local_type), + local_name, PTR_ERR(cand_ids)); return PTR_ERR(cand_ids); } err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL); @@@ -4878,36 -5463,51 +5463,51 @@@
for (i = 0, j = 0; i < cand_ids->len; i++) { cand_id = cand_ids->data[i]; - cand_type = btf__type_by_id(targ_btf, cand_id); - cand_name = btf__name_by_offset(targ_btf, cand_type->name_off); - - err = bpf_core_spec_match(&local_spec, targ_btf, - cand_id, &cand_spec); - pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ", - prog_name, relo_idx, i, cand_name); - bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); - libbpf_print(LIBBPF_DEBUG, ": %d\n", err); + err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec); if (err < 0) { - pr_warn("prog '%s': relo #%d: matching error: %d\n", - prog_name, relo_idx, err); + pr_warn("prog '%s': relo #%d: error matching candidate #%d ", + prog_name, relo_idx, i); + bpf_core_dump_spec(LIBBPF_WARN, &cand_spec); + libbpf_print(LIBBPF_WARN, ": %d\n", err); return err; } + + pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name, + relo_idx, err == 0 ? "non-matching" : "matching", i); + bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); + libbpf_print(LIBBPF_DEBUG, "\n"); + if (err == 0) continue;
+ err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res); + if (err) + return err; + if (j == 0) { + targ_res = cand_res; targ_spec = cand_spec; } else if (cand_spec.bit_offset != targ_spec.bit_offset) { - /* if there are many candidates, they should all - * resolve to the same bit offset + /* if there are many field relo candidates, they + * should all resolve to the same bit offset */ - pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n", + pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", prog_name, relo_idx, cand_spec.bit_offset, targ_spec.bit_offset); return -EINVAL; + } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) { + /* all candidates should result in the same relocation + * decision and value, otherwise it's dangerous to + * proceed due to ambiguity + */ + pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n", + prog_name, relo_idx, + cand_res.poison ? "failure" : "success", cand_res.new_val, + targ_res.poison ? "failure" : "success", targ_res.new_val); + return -EINVAL; }
- cand_ids->data[j++] = cand_spec.spec[0].type_id; + cand_ids->data[j++] = cand_spec.root_type_id; }
/* @@@ -4926,19 -5526,25 +5526,25 @@@ * as well as expected case, depending whether instruction w/ * relocation is guarded in some way that makes it unreachable (dead * code) if relocation can't be resolved. This is handled in - * bpf_core_reloc_insn() uniformly by replacing that instruction with + * bpf_core_patch_insn() uniformly by replacing that instruction with * BPF helper call insn (using invalid helper ID). If that instruction * is indeed unreachable, then it will be ignored and eliminated by * verifier. If it was an error, then verifier will complain and point * to a specific instruction number in its log. */ - if (j == 0) - pr_debug("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n", - prog_name, relo_idx, local_id, local_name, spec_str); + if (j == 0) { + pr_debug("prog '%s': relo #%d: no matching targets found\n", + prog_name, relo_idx); + + /* calculate single target relo result explicitly */ + err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res); + if (err) + return err; + }
- /* bpf_core_reloc_insn should know how to handle missing targ_spec */ - err = bpf_core_reloc_insn(prog, relo, relo_idx, &local_spec, - j ? &targ_spec : NULL); + patch_insn: + /* bpf_core_patch_insn() should know how to handle missing targ_spec */ + err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res); if (err) { pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n", prog_name, relo_idx, relo->insn_off, err); @@@ -4949,10 -5555,10 +5555,10 @@@ }
static int - bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) + bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) { const struct btf_ext_info_sec *sec; - const struct bpf_field_reloc *rec; + const struct bpf_core_relo *rec; const struct btf_ext_info *seg; struct hashmap_entry *entry; struct hashmap *cand_cache = NULL; @@@ -4961,6 -5567,9 +5567,9 @@@ const char *sec_name; int i, err = 0;
+ if (obj->btf_ext->core_relo_info.len == 0) + return 0; + if (targ_btf_path) targ_btf = btf__parse_elf(targ_btf_path, NULL); else @@@ -4976,7 -5585,7 +5585,7 @@@ goto out; }
- seg = &obj->btf_ext->field_reloc_info; + seg = &obj->btf_ext->core_relo_info; for_each_btf_ext_sec(seg, sec) { sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); if (str_is_empty(sec_name)) { @@@ -4997,15 -5606,15 +5606,15 @@@ goto out; }
- pr_debug("prog '%s': performing %d CO-RE offset relocs\n", + pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) { - err = bpf_core_reloc_field(prog, rec, i, obj->btf, - targ_btf, cand_cache); + err = bpf_core_apply_relo(prog, rec, i, obj->btf, + targ_btf, cand_cache); if (err) { pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", - sec_name, i, err); + prog->name, i, err); goto out; } } @@@ -5024,17 -5633,6 +5633,6 @@@ out return err; }
- static int - bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) - { - int err = 0; - - if (obj->btf_ext->field_reloc_info.len) - err = bpf_core_reloc_fields(obj, targ_btf_path); - - return err; - } - static int bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, struct reloc_desc *relo) @@@ -5051,7 -5649,7 +5649,7 @@@ return -LIBBPF_ERRNO__RELOC; } new_cnt = prog->insns_cnt + text->insns_cnt; - new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); + new_insn = libbpf_reallocarray(prog->insns, new_cnt, sizeof(*insn)); if (!new_insn) { pr_warn("oom in prog realloc\n"); return -ENOMEM; @@@ -5136,7 -5734,8 +5734,8 @@@ bpf_program__relocate(struct bpf_progra return err; break; default: - pr_warn("relo #%d: bad relo type %d\n", i, relo->type); + pr_warn("prog '%s': relo #%d: bad relo type %d\n", + prog->name, i, relo->type); return -EINVAL; } } @@@ -5171,7 -5770,8 +5770,8 @@@ bpf_object__relocate(struct bpf_object
err = bpf_program__relocate(prog, obj); if (err) { - pr_warn("failed to relocate '%s'\n", prog->section_name); + pr_warn("prog '%s': failed to relocate data references: %d\n", + prog->name, err); return err; } break; @@@ -5186,7 -5786,8 +5786,8 @@@
err = bpf_program__relocate(prog, obj); if (err) { - pr_warn("failed to relocate '%s'\n", prog->section_name); + pr_warn("prog '%s': failed to relocate calls: %d\n", + prog->name, err); return err; } } @@@ -5203,8 -5804,8 +5804,8 @@@ static int bpf_object__collect_map_relo int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; + struct bpf_map *map = NULL, *targ_map; const struct btf_member *member; - struct bpf_map *map, *targ_map; const char *name, *mname; Elf_Data *symbols; unsigned int moff; @@@ -5230,8 -5831,7 +5831,7 @@@ i, (size_t)GELF_R_SYM(rel.r_info)); return -LIBBPF_ERRNO__FORMAT; } - name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name) ? : "<?>"; + name = elf_sym_str(obj, sym.st_name) ?: "<?>"; if (sym.st_shndx != obj->efile.btf_maps_shndx) { pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", i, name); @@@ -5293,7 -5893,7 +5893,7 @@@ moff /= bpf_ptr_sz; if (moff >= map->init_slots_sz) { new_sz = moff + 1; - tmp = realloc(map->init_slots, new_sz * host_ptr_sz); + tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); if (!tmp) return -ENOMEM; map->init_slots = tmp; @@@ -5348,6 -5948,51 +5948,51 @@@ static int bpf_object__collect_reloc(st return 0; }
+ static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) + { + if (BPF_CLASS(insn->code) == BPF_JMP && + BPF_OP(insn->code) == BPF_CALL && + BPF_SRC(insn->code) == BPF_K && + insn->src_reg == 0 && + insn->dst_reg == 0) { + *func_id = insn->imm; + return true; + } + return false; + } + + static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog) + { + struct bpf_insn *insn = prog->insns; + enum bpf_func_id func_id; + int i; + + for (i = 0; i < prog->insns_cnt; i++, insn++) { + if (!insn_is_helper_call(insn, &func_id)) + continue; + + /* on kernels that don't yet support + * bpf_probe_read_{kernel,user}[_str] helpers, fall back + * to bpf_probe_read() which works well for old kernels + */ + switch (func_id) { + case BPF_FUNC_probe_read_kernel: + case BPF_FUNC_probe_read_user: + if (!kernel_supports(FEAT_PROBE_READ_KERN)) + insn->imm = BPF_FUNC_probe_read; + break; + case BPF_FUNC_probe_read_kernel_str: + case BPF_FUNC_probe_read_user_str: + if (!kernel_supports(FEAT_PROBE_READ_KERN)) + insn->imm = BPF_FUNC_probe_read_str; + break; + default: + break; + } + } + return 0; + } + static int load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *license, __u32 kern_version, int *pfd) @@@ -5364,12 -6009,12 +6009,12 @@@ memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = prog->type; /* old kernels might not support specifying expected_attach_type */ - if (!prog->caps->exp_attach_type && prog->sec_def && + if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && prog->sec_def->is_exp_attach_type_optional) load_attr.expected_attach_type = 0; else load_attr.expected_attach_type = prog->expected_attach_type; - if (prog->caps->name) + if (kernel_supports(FEAT_PROG_NAME)) load_attr.name = prog->name; load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; @@@ -5387,7 -6032,7 +6032,7 @@@ } /* specify func_info/line_info only if kernel supports them */ btf_fd = bpf_object__btf_fd(prog->obj); - if (btf_fd >= 0 && prog->obj->caps.btf_func) { + if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) { load_attr.prog_btf_fd = btf_fd; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; @@@ -5425,7 -6070,7 +6070,7 @@@ retry_load free(log_buf); goto retry_load; } - ret = -errno; + ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("load bpf program failed: %s\n", cp); pr_perm_msg(ret); @@@ -5562,13 -6207,19 +6207,19 @@@ bpf_object__load_progs(struct bpf_objec size_t i; int err;
+ for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + err = bpf_object__sanitize_prog(obj, prog); + if (err) + return err; + } + for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; if (bpf_program__is_function_storage(prog, obj)) continue; if (!prog->load) { - pr_debug("prog '%s'('%s'): skipped loading\n", - prog->name, prog->section_name); + pr_debug("prog '%s': skipped loading\n", prog->name); continue; } prog->log_level |= log_level; @@@ -5641,6 -6292,8 +6292,8 @@@ __bpf_object__open(const char *path, co /* couldn't guess, but user might manually specify */ continue;
+ if (prog->sec_def->is_sleepable) + prog->prog_flags |= BPF_F_SLEEPABLE; bpf_program__set_type(prog, prog->sec_def->prog_type); bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type); @@@ -5750,11 -6403,11 +6403,11 @@@ static int bpf_object__sanitize_maps(st bpf_object__for_each_map(m, obj) { if (!bpf_map__is_internal(m)) continue; - if (!obj->caps.global_data) { + if (!kernel_supports(FEAT_GLOBAL_DATA)) { pr_warn("kernel doesn't support global data\n"); return -ENOTSUP; } - if (!obj->caps.array_mmap) + if (!kernel_supports(FEAT_ARRAY_MMAP)) m->def.map_flags ^= BPF_F_MMAPABLE; }
@@@ -5904,7 -6557,6 +6557,6 @@@ int bpf_object__load_xattr(struct bpf_o }
err = bpf_object__probe_loading(obj); - err = err ? : bpf_object__probe_caps(obj); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); err = err ? : bpf_object__sanitize_and_load_btf(obj); err = err ? : bpf_object__sanitize_maps(obj); @@@ -6713,7 -7365,7 +7365,7 @@@ int bpf_program__fd(const struct bpf_pr
size_t bpf_program__size(const struct bpf_program *prog) { - return prog->insns_cnt * sizeof(struct bpf_insn); + return prog->insns_cnt * BPF_INSN_SZ; }
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, @@@ -6910,6 -7562,21 +7562,21 @@@ static const struct bpf_sec_def section .expected_attach_type = BPF_TRACE_FEXIT, .is_attach_btf = true, .attach_fn = attach_trace), + SEC_DEF("fentry.s/", TRACING, + .expected_attach_type = BPF_TRACE_FENTRY, + .is_attach_btf = true, + .is_sleepable = true, + .attach_fn = attach_trace), + SEC_DEF("fmod_ret.s/", TRACING, + .expected_attach_type = BPF_MODIFY_RETURN, + .is_attach_btf = true, + .is_sleepable = true, + .attach_fn = attach_trace), + SEC_DEF("fexit.s/", TRACING, + .expected_attach_type = BPF_TRACE_FEXIT, + .is_attach_btf = true, + .is_sleepable = true, + .attach_fn = attach_trace), SEC_DEF("freplace/", EXT, .is_attach_btf = true, .attach_fn = attach_trace), @@@ -6917,6 -7584,11 +7584,11 @@@ .is_attach_btf = true, .expected_attach_type = BPF_LSM_MAC, .attach_fn = attach_lsm), + SEC_DEF("lsm.s/", LSM, + .is_attach_btf = true, + .is_sleepable = true, + .expected_attach_type = BPF_LSM_MAC, + .attach_fn = attach_lsm), SEC_DEF("iter/", TRACING, .expected_attach_type = BPF_TRACE_ITER, .is_attach_btf = true, @@@ -7122,8 -7794,7 +7794,7 @@@ static int bpf_object__collect_st_ops_r return -LIBBPF_ERRNO__FORMAT; }
- name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name) ? : "<?>"; + name = elf_sym_str(obj, sym.st_name) ?: "<?>"; map = find_struct_ops_map_by_offset(obj, rel.r_offset); if (!map) { pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n", @@@ -7640,7 -8311,7 +8311,7 @@@ int bpf_prog_load_xattr(const struct bp
prog->prog_ifindex = attr->ifindex; prog->log_level = attr->log_level; - prog->prog_flags = attr->prog_flags; + prog->prog_flags |= attr->prog_flags; if (!first_prog) first_prog = prog; } @@@ -8594,7 -9265,7 +9265,7 @@@ struct perf_buffer *perf_buffer__new(in struct perf_buffer_params p = {}; struct perf_event_attr attr = { 0, };
- attr.config = PERF_COUNT_SW_BPF_OUTPUT, + attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; attr.sample_period = 1; @@@ -8832,6 -9503,11 +9503,11 @@@ static int perf_buffer__process_records return 0; }
+ int perf_buffer__epoll_fd(const struct perf_buffer *pb) + { + return pb->epoll_fd; + } + int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) { int i, cnt, err; @@@ -8849,6 -9525,55 +9525,55 @@@ return cnt < 0 ? -errno : cnt; }
+ /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer + * manager. + */ + size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) + { + return pb->cpu_cnt; + } + + /* + * Return perf_event FD of a ring buffer in *buf_idx* slot of + * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using + * select()/poll()/epoll() Linux syscalls. + */ + int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) + { + struct perf_cpu_buf *cpu_buf; + + if (buf_idx >= pb->cpu_cnt) + return -EINVAL; + + cpu_buf = pb->cpu_bufs[buf_idx]; + if (!cpu_buf) + return -ENOENT; + + return cpu_buf->fd; + } + + /* + * Consume data from perf ring buffer corresponding to slot *buf_idx* in + * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to + * consume, do nothing and return success. + * Returns: + * - 0 on success; + * - <0 on failure. + */ + int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) + { + struct perf_cpu_buf *cpu_buf; + + if (buf_idx >= pb->cpu_cnt) + return -EINVAL; + + cpu_buf = pb->cpu_bufs[buf_idx]; + if (!cpu_buf) + return -ENOENT; + + return perf_buffer__process_records(pb, cpu_buf); + } + int perf_buffer__consume(struct perf_buffer *pb) { int i, err; @@@ -8861,7 -9586,7 +9586,7 @@@
err = perf_buffer__process_records(pb, cpu_buf); if (err) { - pr_warn("error while processing records: %d\n", err); + pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); return err; } }
linux-merge@lists.open-mesh.org