The following commit has been merged in the master branch: commit 44a8c4f33c0073ca614db79f22e023811bdd0f3c Merge: 3ab1270bfa1e8ae7db0d46fee90c5db2935ac91b c70672d8d316ebd46ea447effadfe57ab7a30a50 Author: Jakub Kicinski kuba@kernel.org Date: Fri Sep 4 21:18:58 2020 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
We got slightly different patches removing a double word in a comment in net/ipv4/raw.c - picked the version from net.
Simple conflict in drivers/net/ethernet/ibm/ibmvnic.c. Use cached values instead of VNIC login response buffer (following what commit 507ebe6444a4 ("ibmvnic: Fix use-after-free of VNIC login response buffer") did).
Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --combined MAINTAINERS index a1c15b6714a0,dca9bfd8c888..cd4ce7977b6c --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1286,7 -1286,7 +1286,7 @@@ S: Supporte F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt F: drivers/net/ethernet/apm/xgene/ -F: drivers/net/phy/mdio-xgene.c +F: drivers/net/mdio/mdio-xgene.c
APPLIED MICRO (APM) X-GENE SOC PMU M: Khuong Dinh khuong@os.amperecomputing.com @@@ -3205,6 -3205,7 +3205,7 @@@ S: Maintaine T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: block/ F: drivers/block/ + F: include/linux/blk* F: kernel/trace/blktrace.c F: lib/sbitmap.c
@@@ -3388,6 -3389,7 +3389,7 @@@ M: Florian Fainelli <f.fainelli@gmail.c L: netdev@vger.kernel.org L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Supported + F: Documentation/devicetree/bindings/net/dsa/b53.txt F: drivers/net/dsa/b53/* F: include/linux/platform_data/b53.h
@@@ -3573,13 -3575,28 +3575,28 @@@ L: bcm-kernel-feedback-list@broadcom.co S: Maintained F: drivers/phy/broadcom/phy-brcm-usb*
+ BROADCOM ETHERNET PHY DRIVERS + M: Florian Fainelli f.fainelli@gmail.com + L: bcm-kernel-feedback-list@broadcom.com + L: netdev@vger.kernel.org + S: Supported + F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt + F: drivers/net/phy/bcm*.[ch] + F: drivers/net/phy/broadcom.c + F: include/linux/brcmphy.h + BROADCOM GENET ETHERNET DRIVER M: Doug Berger opendmb@gmail.com M: Florian Fainelli f.fainelli@gmail.com L: bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported + F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt + F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt F: drivers/net/ethernet/broadcom/genet/ + F: drivers/net/mdio/mdio-bcm-unimac.c + F: include/linux/platform_data/bcmgenet.h + F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE M: Ray Jui rjui@broadcom.com @@@ -4692,15 -4709,6 +4709,15 @@@ S: Supporte W: http://www.chelsio.com F: drivers/crypto/chelsio
+CXGB4 INLINE CRYPTO DRIVER +M: Ayush Sawal ayush.sawal@chelsio.com +M: Vinay Kumar Yadav vinay.yadav@chelsio.com +M: Rohit Maheshwari rohitm@chelsio.com +L: netdev@vger.kernel.org +S: Supported +W: http://www.chelsio.com +F: drivers/net/ethernet/chelsio/inline_crypto/ + CXGB4 ETHERNET DRIVER (CXGB4) M: Vishal Kulkarni vishal@chelsio.com L: netdev@vger.kernel.org @@@ -5248,6 -5256,7 +5265,7 @@@ DOCUMENTATIO M: Jonathan Corbet corbet@lwn.net L: linux-doc@vger.kernel.org S: Maintained + P: Documentation/doc-guide/maintainer-profile.rst T: git git://git.lwn.net/linux.git docs-next F: Documentation/ F: scripts/documentation-file-ref-check @@@ -6503,7 -6512,6 +6521,6 @@@ F: net/bridge
ETHERNET PHY LIBRARY M: Andrew Lunn andrew@lunn.ch - M: Florian Fainelli f.fainelli@gmail.com M: Heiner Kallweit hkallweit1@gmail.com R: Russell King linux@armlinux.org.uk L: netdev@vger.kernel.org @@@ -6513,14 -6521,11 +6530,14 @@@ F: Documentation/devicetree/bindings/ne F: Documentation/devicetree/bindings/net/mdio* F: Documentation/devicetree/bindings/net/qca,ar803x.yaml F: Documentation/networking/phy.rst +F: drivers/net/mdio/ +F: drivers/net/pcs/ F: drivers/net/phy/ F: drivers/of/of_mdio.c F: drivers/of/of_net.c F: include/dt-bindings/net/qca-ar803x.h F: include/linux/*mdio*.h +F: include/linux/mdio/*.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h @@@ -10293,13 -10298,6 +10310,13 @@@ S: Maintaine W: http://linux-test-project.github.io/ T: git git://github.com/linux-test-project/ltp.git
+LYNX PCS MODULE +M: Ioana Ciornei ioana.ciornei@nxp.com +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/phy/pcs-lynx.c +F: include/linux/pcs-lynx.h + M68K ARCHITECTURE M: Geert Uytterhoeven geert@linux-m68k.org L: linux-m68k@lists.linux-m68k.org @@@ -10507,7 -10505,7 +10524,7 @@@ M: Tobias Waldekranz <tobias@waldekranz L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/marvell,mvusb.yaml -F: drivers/net/phy/mdio-mvusb.c +F: drivers/net/mdio/mdio-mvusb.c
MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER M: Hu Ziji huziji@marvell.com @@@ -13588,12 -13586,18 +13605,18 @@@ F: kernel/events/ F: tools/lib/perf/ F: tools/perf/
- PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS + PERFORMANCE EVENTS TOOLING ARM64 R: John Garry john.garry@huawei.com R: Will Deacon will@kernel.org + R: Mathieu Poirier mathieu.poirier@linaro.org + R: Leo Yan leo.yan@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported + F: tools/build/feature/test-libopencsd.c + F: tools/perf/arch/arm*/ F: tools/perf/pmu-events/arch/arm64/ + F: tools/perf/util/arm-spe* + F: tools/perf/util/cs-etm*
PERSONALITY HANDLING M: Christoph Hellwig hch@infradead.org @@@ -15656,7 -15660,6 +15679,7 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/phy/phylink.c F: drivers/net/phy/sfp* +F: include/linux/mdio/mdio-i2c.h F: include/linux/phylink.h F: include/linux/sfp.h K: phylink.h|struct\s+phylink|.phylink|>phylink_|phylink_(autoneg|clear|connect|create|destroy|disconnect|ethtool|helper|mac|mii|of|set|start|stop|test|validate) @@@ -16741,8 -16744,8 +16764,8 @@@ SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVE M: Jose Abreu Jose.Abreu@synopsys.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/phy/mdio-xpcs.c -F: include/linux/mdio-xpcs.h +F: drivers/net/pcs/pcs-xpcs.c +F: include/linux/pcs/pcs-xpcs.h
SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula jarkko.nikula@linux.intel.com @@@ -17136,8 -17139,8 +17159,8 @@@ S: Maintaine F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt - F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt - F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt + F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml + F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt F: drivers/clk/keystone/sci-clk.c @@@ -18894,6 -18897,15 +18917,15 @@@ S: Maintaine T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core F: arch/x86/platform
+ X86 PLATFORM UV HPE SUPERDOME FLEX + M: Steve Wahl steve.wahl@hpe.com + R: Dimitri Sivanich dimitri.sivanich@hpe.com + R: Russ Anderson russ.anderson@hpe.com + S: Supported + F: arch/x86/include/asm/uv/ + F: arch/x86/kernel/apic/x2apic_uv_x.c + F: arch/x86/platform/uv/ + X86 VDSO M: Andy Lutomirski luto@kernel.org L: linux-kernel@vger.kernel.org diff --combined drivers/net/dsa/b53/b53_common.c index 5d0618e99156,e731db900ee0..26fcff85d881 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@@ -17,6 -17,8 +17,6 @@@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio.h> @@@ -765,11 -767,8 +765,11 @@@ static int b53_switch_reset(struct b53_ usleep_range(1000, 2000); } while (timeout-- > 0);
- if (timeout == 0) + if (timeout == 0) { + dev_err(dev->dev, + "Timeout waiting for SW_RST to clear!\n"); return -ETIMEDOUT; + } }
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); @@@ -1062,7 -1061,7 +1062,7 @@@ static void b53_force_port_config(struc switch (speed) { case 2000: reg |= PORT_OVERRIDE_SPEED_2000M; - /* fallthrough */ + fallthrough; case SPEED_1000: reg |= PORT_OVERRIDE_SPEED_1000M; break; @@@ -2621,9 -2620,8 +2621,9 @@@ int b53_switch_detect(struct b53_devic dev->chip_id = id32; break; default: - pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n", - id8, id32); + dev_err(dev->dev, + "unsupported switch detected (BCM53%02x/BCM%x)\n", + id8, id32); return -ENODEV; } } @@@ -2653,8 -2651,7 +2653,8 @@@ int b53_switch_register(struct b53_devi if (ret) return ret;
- pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); + dev_info(dev->dev, "found switch: %s, rev %i\n", + dev->name, dev->core_rev);
return dsa_register_switch(dev->ds); } diff --combined drivers/net/dsa/bcm_sf2.c index 1c7fbb6f0447,5ebff986a1ac..7a74e4d73415 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@@ -14,7 -14,6 +14,7 @@@ #include <linux/phy_fixed.h> #include <linux/phylink.h> #include <linux/mii.h> +#include <linux/clk.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> @@@ -32,49 -31,6 +32,49 @@@ #include "b53/b53_priv.h" #include "b53/b53_regs.h"
+/* Return the number of active ports, not counting the IMP (CPU) port */ +static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int port, count = 0; + + for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) { + if (dsa_is_cpu_port(ds, port)) + continue; + if (priv->port_sts[port].enabled) + count++; + } + + return count; +} + +static void bcm_sf2_recalc_clock(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned long new_rate; + unsigned int ports_active; + /* Frequenty in Mhz */ + const unsigned long rate_table[] = { + 59220000, + 60820000, + 62500000, + 62500000, + }; + + ports_active = bcm_sf2_num_active_ports(ds); + if (ports_active == 0 || !priv->clk_mdiv) + return; + + /* If we overflow our table, just use the recommended operational + * frequency + */ + if (ports_active > ARRAY_SIZE(rate_table)) + new_rate = 90000000; + else + new_rate = rate_table[ports_active - 1]; + clk_set_rate(priv->clk_mdiv, new_rate); +} + static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); @@@ -126,8 -82,6 +126,8 @@@ reg &= ~(RX_DIS | TX_DIS); core_writel(priv, reg, CORE_G_PCTL_PORT(port)); } + + priv->port_sts[port].enabled = true; }
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) @@@ -213,10 -167,6 +213,10 @@@ static int bcm_sf2_port_setup(struct ds if (!dsa_is_user_port(ds, port)) return 0;
+ priv->port_sts[port].enabled = true; + + bcm_sf2_recalc_clock(ds); + /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); @@@ -310,10 -260,6 +310,10 @@@ static void bcm_sf2_port_disable(struc reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + priv->port_sts[port].enabled = false; + + bcm_sf2_recalc_clock(ds); }
@@@ -620,7 -566,7 +620,7 @@@ static void bcm_sf2_sw_mac_config(struc switch (state->interface) { case PHY_INTERFACE_MODE_RGMII: id_mode_dis = 1; - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_RGMII_TXID: port_mode = EXT_GPHY; break; @@@ -804,9 -750,6 +804,9 @@@ static int bcm_sf2_sw_suspend(struct ds bcm_sf2_port_disable(ds, port); }
+ if (!priv->wol_ports_mask) + clk_disable_unprepare(priv->clk); + return 0; }
@@@ -815,9 -758,6 +815,9 @@@ static int bcm_sf2_sw_resume(struct dsa struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret;
+ if (!priv->wol_ports_mask) + clk_prepare_enable(priv->clk); + ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("%s: failed to software reset switch\n", __func__); @@@ -1249,24 -1189,10 +1249,24 @@@ static int bcm_sf2_sw_probe(struct plat base++; }
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + clk_prepare_enable(priv->clk); + + priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv"); + if (IS_ERR(priv->clk_mdiv)) { + ret = PTR_ERR(priv->clk_mdiv); + goto out_clk; + } + + clk_prepare_enable(priv->clk_mdiv); + ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); - return ret; + goto out_clk_mdiv; }
bcm_sf2_gphy_enable_set(priv->dev->ds, true); @@@ -1274,7 -1200,7 +1274,7 @@@ ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); - return ret; + goto out_clk_mdiv; }
bcm_sf2_gphy_enable_set(priv->dev->ds, false); @@@ -1341,10 -1267,6 +1341,10 @@@
out_mdio: bcm_sf2_mdio_unregister(priv); +out_clk_mdiv: + clk_disable_unprepare(priv->clk_mdiv); +out_clk: + clk_disable_unprepare(priv->clk); return ret; }
@@@ -1358,8 -1280,6 +1358,8 @@@ static int bcm_sf2_sw_remove(struct pla dsa_unregister_switch(priv->dev->ds); bcm_sf2_cfp_exit(priv->dev->ds); bcm_sf2_mdio_unregister(priv); + clk_disable_unprepare(priv->clk_mdiv); + clk_disable_unprepare(priv->clk); if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) reset_control_assert(priv->rcdev);
diff --combined drivers/net/dsa/mv88e6xxx/chip.c index 895d7b6dba2d,f0dbc05e30a4..15b97a4f8d93 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@@ -875,7 -875,7 +875,7 @@@ static uint64_t _mv88e6xxx_get_ethtool_ break; case STATS_TYPE_BANK1: reg = bank1_select; - /* fall through */ + fallthrough; case STATS_TYPE_BANK0: reg |= s->reg | histogram; mv88e6xxx_g1_stats_read(chip, reg, &low); @@@ -3329,6 -3329,12 +3329,6 @@@ static int mv88e6xxx_mdio_register(stru return 0; }
-static const struct of_device_id mv88e6xxx_mdio_external_match[] = { - { .compatible = "marvell,mv88e6xxx-mdio-external", - .data = (void *)true }, - { }, -}; - static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{ @@@ -3348,6 -3354,7 +3348,6 @@@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, struct device_node *np) { - const struct of_device_id *match; struct device_node *child; int err;
@@@ -3365,8 -3372,8 +3365,8 @@@ * bus. */ for_each_available_child_of_node(np, child) { - match = of_match_node(mv88e6xxx_mdio_external_match, child); - if (match) { + if (of_device_is_compatible( + child, "marvell,mv88e6xxx-mdio-external")) { err = mv88e6xxx_mdio_register(chip, child, true); if (err) { mv88e6xxx_mdios_unregister(chip); diff --combined drivers/net/dsa/ocelot/felix.c index ccc0427faf02,04bfa6e465ff..a1e1d3824110 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@@ -19,7 -19,6 +19,7 @@@ #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> +#include <linux/pcs-lynx.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include "felix.h" @@@ -197,16 -196,27 +197,16 @@@ static void felix_phylink_validate(stru felix->info->phylink_validate(ocelot, port, supported, state); }
-static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) -{ - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - - if (felix->info->pcs_link_state) - felix->info->pcs_link_state(ocelot, port, state); - - return 0; -} - static void felix_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int link_an_mode, const struct phylink_link_state *state) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->info->pcs_config) - felix->info->pcs_config(ocelot, port, link_an_mode, state); + if (felix->pcs[port]) + phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); }
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, @@@ -296,6 -306,10 +296,6 @@@ static void felix_phylink_mac_link_up(s ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- if (felix->info->pcs_link_up) - felix->info->pcs_link_up(ocelot, port, link_an_mode, interface, - speed, duplex); - if (felix->info->port_sched_speed_set) felix->info->port_sched_speed_set(ocelot, port, speed); } @@@ -386,6 -400,7 +386,7 @@@ static int felix_parse_ports_node(struc if (err < 0) { dev_err(dev, "Unsupported PHY mode %s on port %d\n", phy_modes(phy_mode), port); + of_node_put(child); return err; }
@@@ -612,6 -627,11 +613,6 @@@ static int felix_setup(struct dsa_switc
ds->mtu_enforcement_ingress = true; ds->configure_vlan_while_not_filtering = true; - /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040) - * isn't instantiated for the Felix PF. - * In-band AN may take a few ms to complete, so we need to poll. - */ - ds->pcs_poll = true;
return 0; } @@@ -767,6 -787,7 +768,6 @@@ const struct dsa_switch_ops felix_switc .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, .phylink_validate = felix_phylink_validate, - .phylink_mac_link_state = felix_phylink_mac_pcs_get_state, .phylink_mac_config = felix_phylink_mac_config, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, diff --combined drivers/net/ethernet/8390/axnet_cs.c index a001bc902359,a00b36f91d9f..2488bfdb9133 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@@ -610,7 -610,7 +610,7 @@@ static int axnet_ioctl(struct net_devic switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* Fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; @@@ -657,10 -657,8 +657,10 @@@ static void block_input(struct net_devi outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
insw(nic_base + AXNET_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++; + if (count & 0x01) { + buf[count-1] = inb(nic_base + AXNET_DATAPORT); + xfer_count++; + }
}
@@@ -1272,12 -1270,10 +1272,12 @@@ static void ei_tx_intr(struct net_devic ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); netif_trans_update(dev); - ei_local->tx2 = -1, + ei_local->tx2 = -1; ei_local->lasttx = 2; + } else { + ei_local->lasttx = 20; + ei_local->txing = 0; } - else ei_local->lasttx = 20, ei_local->txing = 0; } else if (ei_local->tx2 < 0) { @@@ -1293,10 -1289,9 +1293,10 @@@ netif_trans_update(dev); ei_local->tx1 = -1; ei_local->lasttx = 1; + } else { + ei_local->lasttx = 10; + ei_local->txing = 0; } - else - ei_local->lasttx = 10, ei_local->txing = 0; } // else // netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", diff --combined drivers/net/ethernet/8390/pcnet_cs.c index c383f16889f4,164c3ed550bf..9d3b1e0e425c --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@@ -1108,7 -1108,7 +1108,7 @@@ static int ei_ioctl(struct net_device * switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; @@@ -1178,10 -1178,8 +1178,10 @@@ static void dma_block_input(struct net_ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
insw(nic_base + PCNET_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++; + if (count & 0x01) { + buf[count-1] = inb(nic_base + PCNET_DATAPORT); + xfer_count++; + }
/* This was for the ALPHA version only, but enough people have been encountering problems that it is still here. */ diff --combined drivers/net/ethernet/broadcom/bcmsysport.c index b25c70b74c92,0762d5d1a810..0fdd19d99d99 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@@ -20,7 -20,6 +20,7 @@@ #include <linux/phy.h> #include <linux/phy_fixed.h> #include <net/dsa.h> +#include <linux/clk.h> #include <net/ip.h> #include <net/ipv6.h>
@@@ -187,11 -186,6 +187,11 @@@ static int bcm_sysport_set_features(str netdev_features_t features) { struct bcm_sysport_priv *priv = netdev_priv(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret;
/* Read CRC forward */ if (!priv->is_lite) @@@ -203,8 -197,6 +203,8 @@@ bcm_sysport_set_rx_csum(dev, features); bcm_sysport_set_tx_csum(dev, features);
+ clk_disable_unprepare(priv->clk); + return 0; }
@@@ -1948,8 -1940,6 +1948,8 @@@ static int bcm_sysport_open(struct net_ unsigned int i; int ret;
+ clk_prepare_enable(priv->clk); + /* Reset UniMAC */ umac_reset(priv);
@@@ -1980,8 -1970,7 +1980,8 @@@ 0, priv->phy_interface); if (!phydev) { netdev_err(dev, "could not attach to PHY\n"); - return -ENODEV; + ret = -ENODEV; + goto out_clk_disable; }
/* Reset house keeping link status */ @@@ -2059,8 -2048,6 +2059,8 @@@ out_free_irq0 free_irq(priv->irq0, dev); out_phy_disconnect: phy_disconnect(phydev); +out_clk_disable: + clk_disable_unprepare(priv->clk); return ret; }
@@@ -2119,8 -2106,6 +2119,8 @@@ static int bcm_sysport_stop(struct net_ /* Disconnect from PHY */ phy_disconnect(dev->phydev);
+ clk_disable_unprepare(priv->clk); + return 0; }
@@@ -2502,16 -2487,14 +2502,18 @@@ static int bcm_sysport_probe(struct pla /* Initialize private members */ priv = netdev_priv(dev);
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + /* Allocate number of TX rings */ priv->tx_rings = devm_kcalloc(&pdev->dev, txq, sizeof(struct bcm_sysport_tx_ring), GFP_KERNEL); - if (!priv->tx_rings) - return -ENOMEM; + if (!priv->tx_rings) { + ret = -ENOMEM; + goto err_free_netdev; + }
priv->is_lite = params->is_lite; priv->num_rx_desc_words = params->num_rx_desc_words; @@@ -2583,10 -2566,6 +2585,10 @@@ if (!ret) device_set_wakeup_capable(&pdev->dev, 1);
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); + if (IS_ERR(priv->wol_clk)) + return PTR_ERR(priv->wol_clk); + /* Set the needed headroom once and for all */ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); dev->needed_headroom += sizeof(struct bcm_tsb); @@@ -2611,8 -2590,6 +2613,8 @@@ goto err_deregister_notifier; }
+ clk_prepare_enable(priv->clk); + priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, "Broadcom SYSTEMPORT%s " REV_FMT @@@ -2621,8 -2598,6 +2623,8 @@@ (priv->rev >> 8) & 0xff, priv->rev & 0xff, priv->irq0, priv->irq1, txq, rxq);
+ clk_disable_unprepare(priv->clk); + return 0;
err_deregister_notifier: @@@ -2776,12 -2751,8 +2778,12 @@@ static int __maybe_unused bcm_sysport_s bcm_sysport_fini_rx_ring(priv);
/* Get prepared for Wake-on-LAN */ - if (device_may_wakeup(d) && priv->wolopts) + if (device_may_wakeup(d) && priv->wolopts) { + clk_prepare_enable(priv->wol_clk); ret = bcm_sysport_suspend_to_wol(priv); + } + + clk_disable_unprepare(priv->clk);
return ret; } @@@ -2796,10 -2767,6 +2798,10 @@@ static int __maybe_unused bcm_sysport_r if (!netif_running(dev)) return 0;
+ clk_prepare_enable(priv->clk); + if (priv->wolopts) + clk_disable_unprepare(priv->wol_clk); + umac_reset(priv);
/* Disable the UniMAC RX/TX */ @@@ -2879,7 -2846,6 +2881,7 @@@ out_free_rx_ring out_free_tx_rings: for (i = 0; i < dev->num_tx_queues; i++) bcm_sysport_fini_tx_ring(priv, i); + clk_disable_unprepare(priv->clk); return ret; }
diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e49370f9d59b,fa3367966f4b..98d01a7497ec --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@@ -4745,11 -4745,9 +4745,11 @@@ static void le_intr_handler(struct adap static struct intr_info t6_le_intr_info[] = { { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { CMDTIDERR_F, "LE cmd tid error", -1, 1 }, { TCAMINTPERR_F, "LE parity error", -1, 1 }, { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 }, { 0 } };
@@@ -7658,13 -7656,13 +7658,13 @@@ int t4_alloc_vi(struct adapter *adap, u switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); - /* Fall through */ + fallthrough; case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); - /* Fall through */ + fallthrough; case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); - /* Fall through */ + fallthrough; case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } diff --combined drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index cb3083d2b4ab,cf5383bb8331..ceaf76158e23 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@@ -40,9 -40,9 +40,9 @@@ static void *dpaa2_iova_to_virt(struct return phys_to_virt(phys_addr); }
-static void validate_rx_csum(struct dpaa2_eth_priv *priv, - u32 fd_status, - struct sk_buff *skb) +static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) { skb_checksum_none_assert(skb);
@@@ -62,9 -62,9 +62,9 @@@ /* Free a received FD. * Not to be used for Tx conf FDs or on any other paths. */ -static void free_rx_fd(struct dpaa2_eth_priv *priv, - const struct dpaa2_fd *fd, - void *vaddr) +static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, + void *vaddr) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t addr = dpaa2_fd_get_addr(fd); @@@ -100,9 -100,9 +100,9 @@@ free_buf }
/* Build a linear skb based on a single-buffer frame descriptor */ -static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - void *fd_vaddr) +static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) { struct sk_buff *skb = NULL; u16 fd_offset = dpaa2_fd_get_offset(fd); @@@ -121,9 -121,9 +121,9 @@@ }
/* Build a non linear (fragmented) skb based on a S/G table */ -static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_sg_entry *sgt) +static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_sg_entry *sgt) { struct sk_buff *skb = NULL; struct device *dev = priv->net_dev->dev.parent; @@@ -204,8 -204,7 +204,8 @@@ /* Free buffers acquired from the buffer pool or which were meant to * be released in the pool */ -static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) +static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, + int count) { struct device *dev = priv->net_dev->dev.parent; void *vaddr; @@@ -219,9 -218,9 +219,9 @@@ } }
-static void xdp_release_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) +static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; @@@ -239,7 -238,7 +239,7 @@@ }
if (err) { - free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); + dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); ch->buf_count -= ch->xdp.drop_cnt; }
@@@ -275,9 -274,9 +275,9 @@@ static int dpaa2_eth_xdp_flush(struct d return total_enqueued; }
-static void xdp_tx_flush(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_eth_fq *fq) +static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq) { struct rtnl_link_stats64 *percpu_stats; struct dpaa2_fd *fds; @@@ -296,17 -295,17 +296,17 @@@ ch->stats.xdp_tx++; } for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { - xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); + dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); percpu_stats->tx_errors++; ch->stats.xdp_tx_err++; } fq->xdp_tx_fds.num = 0; }
-static void xdp_enqueue(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_fd *fd, - void *buf_start, u16 queue_id) +static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) { struct dpaa2_faead *faead; struct dpaa2_fd *dest_fd; @@@ -334,13 -333,13 +334,13 @@@ if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) return;
- xdp_tx_flush(priv, ch, fq); + dpaa2_eth_xdp_tx_flush(priv, ch, fq); }
-static u32 run_xdp(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_eth_fq *rx_fq, - struct dpaa2_fd *fd, void *vaddr) +static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) { dma_addr_t addr = dpaa2_fd_get_addr(fd); struct bpf_prog *xdp_prog; @@@ -373,16 -372,16 +373,16 @@@ case XDP_PASS: break; case XDP_TX: - xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); + dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); break; default: bpf_warn_invalid_xdp_action(xdp_act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); - /* fall through */ + fallthrough; case XDP_DROP: - xdp_release_buf(priv, ch, addr); + dpaa2_eth_xdp_release_buf(priv, ch, addr); ch->stats.xdp_drop++; break; case XDP_REDIRECT: @@@ -442,7 -441,7 +442,7 @@@ static void dpaa2_eth_rx(struct dpaa2_e percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) { - xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); if (xdp_act != XDP_PASS) { percpu_stats->rx_packets++; percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); @@@ -451,13 -450,13 +451,13 @@@
dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - skb = build_linear_skb(ch, fd, vaddr); + skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - skb = build_frag_skb(priv, ch, buf_data); + skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); free_pages((unsigned long)vaddr, 0); percpu_extras->rx_sg_frames++; percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); @@@ -486,7 -485,7 +486,7 @@@ /* Check if we need to validate the L4 csum */ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { status = le32_to_cpu(fas->status); - validate_rx_csum(priv, status, skb); + dpaa2_eth_validate_rx_csum(priv, status, skb); }
skb->protocol = eth_type_trans(skb, priv->net_dev); @@@ -500,7 -499,7 +500,7 @@@ return;
err_build_skb: - free_rx_fd(priv, fd, vaddr); + dpaa2_eth_free_rx_fd(priv, fd, vaddr); err_frame_format: percpu_stats->rx_dropped++; } @@@ -511,8 -510,8 +511,8 @@@ * * Observance of NAPI budget is not our concern, leaving that to the caller. */ -static int consume_frames(struct dpaa2_eth_channel *ch, - struct dpaa2_eth_fq **src) +static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq **src) { struct dpaa2_eth_priv *priv = ch->priv; struct dpaa2_eth_fq *fq = NULL; @@@ -561,7 -560,7 +561,7 @@@ }
/* Configure the egress frame annotation for timestamp update */ -static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) +static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) { struct dpaa2_faead *faead; u32 ctrl, frc; @@@ -583,9 -582,9 +583,9 @@@ }
/* Create a frame descriptor based on a fragmented skb */ -static int build_sg_fd(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, - struct dpaa2_fd *fd) +static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) { struct device *dev = priv->net_dev->dev.parent; void *sgt_buf = NULL; @@@ -674,7 -673,7 +674,7 @@@ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - enable_tx_tstamp(fd, sgt_buf); + dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
return 0;
@@@ -693,9 -692,9 +693,9 @@@ dma_map_sg_failed * enough for the HW requirements, thus instead of realloc-ing the skb we * create a SG frame descriptor with only one entry. */ -static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, - struct dpaa2_fd *fd) +static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) { struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_sgt_cache *sgt_cache; @@@ -752,7 -751,7 +752,7 @@@ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - enable_tx_tstamp(fd, sgt_buf); + dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
return 0;
@@@ -768,9 -767,9 +768,9 @@@ data_map_failed }
/* Create a frame descriptor based on a linear skb */ -static int build_single_fd(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, - struct dpaa2_fd *fd) +static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) { struct device *dev = priv->net_dev->dev.parent; u8 *buffer_start, *aligned_start; @@@ -808,7 -807,7 +808,7 @@@ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - enable_tx_tstamp(fd, buffer_start); + dpaa2_eth_enable_tx_tstamp(fd, buffer_start);
return 0; } @@@ -820,9 -819,9 +820,9 @@@ * This can be called either from dpaa2_eth_tx_conf() or on the error path of * dpaa2_eth_tx(). */ -static void free_tx_fd(const struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq, - const struct dpaa2_fd *fd, bool in_napi) +static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr, sg_addr; @@@ -955,17 -954,17 +955,17 @@@ static netdev_tx_t dpaa2_eth_tx(struct memset(&fd, 0, sizeof(fd));
if (skb_is_nonlinear(skb)) { - err = build_sg_fd(priv, skb, &fd); + err = dpaa2_eth_build_sg_fd(priv, skb, &fd); percpu_extras->tx_sg_frames++; percpu_extras->tx_sg_bytes += skb->len; } else if (skb_headroom(skb) < needed_headroom) { - err = build_sg_fd_single_buf(priv, skb, &fd); + err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd); percpu_extras->tx_sg_frames++; percpu_extras->tx_sg_bytes += skb->len; percpu_extras->tx_converted_sg_frames++; percpu_extras->tx_converted_sg_bytes += skb->len; } else { - err = build_single_fd(priv, skb, &fd); + err = dpaa2_eth_build_single_fd(priv, skb, &fd); }
if (unlikely(err)) { @@@ -1011,7 -1010,7 +1011,7 @@@ if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - free_tx_fd(priv, fq, &fd, false); + dpaa2_eth_free_tx_fd(priv, fq, &fd, false); netdev_tx_completed_queue(nq, 1, fd_len); } else { percpu_stats->tx_packets++; @@@ -1046,7 -1045,7 +1046,7 @@@ static void dpaa2_eth_tx_conf(struct dp
/* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - free_tx_fd(priv, fq, fd, true); + dpaa2_eth_free_tx_fd(priv, fq, fd, true);
if (likely(!fd_errors)) return; @@@ -1060,7 -1059,7 +1060,7 @@@ percpu_stats->tx_errors++; }
-static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) +static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) { int err;
@@@ -1083,7 -1082,7 +1083,7 @@@ return 0; }
-static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) +static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) { int err;
@@@ -1107,8 -1106,8 +1107,8 @@@ /* Perform a single release command to add buffers * to the specified buffer pool */ -static int add_bufs(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, u16 bpid) +static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, u16 bpid) { struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; @@@ -1156,7 -1155,7 +1156,7 @@@ release_bufs * not much else we can do about it */ if (err) { - free_bufs(priv, buf_array, i); + dpaa2_eth_free_bufs(priv, buf_array, i); return 0; }
@@@ -1174,7 -1173,7 +1174,7 @@@ err_alloc return 0; }
-static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) { int i, j; int new_count; @@@ -1182,7 -1181,7 +1182,7 @@@ for (j = 0; j < priv->num_channels; j++) { for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { - new_count = add_bufs(priv, priv->channel[j], bpid); + new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) { @@@ -1198,7 -1197,7 +1198,7 @@@ * Drain the specified number of buffers from the DPNI's private buffer pool. * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD */ -static void drain_bufs(struct dpaa2_eth_priv *priv, int count) +static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) { u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; int retries = 0; @@@ -1214,17 -1213,17 +1214,17 @@@ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - free_bufs(priv, buf_array, ret); + dpaa2_eth_free_bufs(priv, buf_array, ret); retries = 0; } while (ret); }
-static void drain_pool(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) { int i;
- drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); - drain_bufs(priv, 1); + dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); + dpaa2_eth_drain_bufs(priv, 1);
for (i = 0; i < priv->num_channels; i++) priv->channel[i]->buf_count = 0; @@@ -1233,9 -1232,9 +1233,9 @@@ /* Function is called from softirq context only, so we don't need to guard * the access to percpu count */ -static int refill_pool(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - u16 bpid) +static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + u16 bpid) { int new_count;
@@@ -1243,7 -1242,7 +1243,7 @@@ return 0;
do { - new_count = add_bufs(priv, ch, bpid); + new_count = dpaa2_eth_add_bufs(priv, ch, bpid); if (unlikely(!new_count)) { /* Out of memory; abort for now, we'll try later on */ break; @@@ -1273,7 -1272,7 +1273,7 @@@ static void dpaa2_eth_sgt_cache_drain(s } }
-static int pull_channel(struct dpaa2_eth_channel *ch) +static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) { int err; int dequeues = -1; @@@ -1320,14 -1319,14 +1320,14 @@@ static int dpaa2_eth_poll(struct napi_s ch->rx_list = &rx_list;
do { - err = pull_channel(ch); + err = dpaa2_eth_pull_channel(ch); if (unlikely(err)) break;
/* Refill pool if appropriate */ - refill_pool(priv, ch, priv->bpid); + dpaa2_eth_refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &fq); + store_cleaned = dpaa2_eth_consume_frames(ch, &fq); if (store_cleaned <= 0) break; if (fq->type == DPAA2_RX_FQ) { @@@ -1376,12 -1375,12 +1376,12 @@@ out if (ch->xdp.res & XDP_REDIRECT) xdp_do_flush_map(); else if (rx_cleaned && ch->xdp.res & XDP_TX) - xdp_tx_flush(priv, ch, &priv->fq[flowid]); + dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done; }
-static void enable_ch_napi(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_channel *ch; int i; @@@ -1392,7 -1391,7 +1392,7 @@@ } }
-static void disable_ch_napi(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_channel *ch; int i; @@@ -1466,7 -1465,7 +1466,7 @@@ set_cgtd priv->rx_cgtd_enabled = td.enable; }
-static int link_state_update(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) { struct dpni_link_state state = {0}; bool tx_pause; @@@ -1518,7 -1517,7 +1518,7 @@@ static int dpaa2_eth_open(struct net_de struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int err;
- err = seed_pool(priv, priv->bpid); + err = dpaa2_eth_seed_pool(priv, priv->bpid); if (err) { /* Not much to do; the buffer pool, though not filled up, * may still contain some buffers which would enable us @@@ -1542,7 -1541,7 +1542,7 @@@ */ netif_carrier_off(net_dev); } - enable_ch_napi(priv); + dpaa2_eth_enable_ch_napi(priv);
err = dpni_enable(priv->mc_io, 0, priv->mc_token); if (err < 0) { @@@ -1554,7 -1553,7 +1554,7 @@@ /* If the DPMAC object has already processed the link up * interrupt, we have to learn the link state ourselves. */ - err = link_state_update(priv); + err = dpaa2_eth_link_state_update(priv); if (err < 0) { netdev_err(net_dev, "Can't update link state\n"); goto link_state_err; @@@ -1567,13 -1566,13 +1567,13 @@@
link_state_err: enable_err: - disable_ch_napi(priv); - drain_pool(priv); + dpaa2_eth_disable_ch_napi(priv); + dpaa2_eth_drain_pool(priv); return err; }
/* Total number of in-flight frames on ingress queues */ -static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) +static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_fq *fq; u32 fcnt = 0, bcnt = 0, total = 0; @@@ -1592,13 -1591,13 +1592,13 @@@ return total; }
-static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) { int retries = 10; u32 pending;
do { - pending = ingress_fq_count(priv); + pending = dpaa2_eth_ingress_fq_count(priv); if (pending) msleep(100); } while (pending && --retries); @@@ -1606,7 -1605,7 +1606,7 @@@
#define DPNI_TX_PENDING_VER_MAJOR 7 #define DPNI_TX_PENDING_VER_MINOR 13 -static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) { union dpni_statistics stats; int retries = 10; @@@ -1652,7 -1651,7 +1652,7 @@@ static int dpaa2_eth_stop(struct net_de * on WRIOP. After it finishes, wait until all remaining frames on Rx * and Tx conf queues are consumed on NAPI poll. */ - wait_for_egress_fq_empty(priv); + dpaa2_eth_wait_for_egress_fq_empty(priv);
do { dpni_disable(priv->mc_io, 0, priv->mc_token); @@@ -1668,11 -1667,11 +1668,11 @@@ */ }
- wait_for_ingress_fq_empty(priv); - disable_ch_napi(priv); + dpaa2_eth_wait_for_ingress_fq_empty(priv); + dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */ - drain_pool(priv); + dpaa2_eth_drain_pool(priv);
/* Empty the Scatter-Gather Buffer cache */ dpaa2_eth_sgt_cache_drain(priv); @@@ -1726,8 -1725,8 +1726,8 @@@ static void dpaa2_eth_get_stats(struct /* Copy mac unicast addresses from @net_dev to @priv. * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. */ -static void add_uc_hw_addr(const struct net_device *net_dev, - struct dpaa2_eth_priv *priv) +static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) { struct netdev_hw_addr *ha; int err; @@@ -1745,8 -1744,8 +1745,8 @@@ /* Copy mac multicast addresses from @net_dev to @priv * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. */ -static void add_mc_hw_addr(const struct net_device *net_dev, - struct dpaa2_eth_priv *priv) +static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) { struct netdev_hw_addr *ha; int err; @@@ -1811,7 -1810,7 +1811,7 @@@ static void dpaa2_eth_set_rx_mode(struc err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); if (err) netdev_warn(net_dev, "Can't clear uc filters\n"); - add_uc_hw_addr(net_dev, priv); + dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Finally, clear uc promisc and set mc promisc as requested. */ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); @@@ -1834,8 -1833,8 +1834,8 @@@ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); if (err) netdev_warn(net_dev, "Can't clear mac filters\n"); - add_mc_hw_addr(net_dev, priv); - add_uc_hw_addr(net_dev, priv); + dpaa2_eth_add_mc_hw_addr(net_dev, priv); + dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Now we can clear both ucast and mcast promisc, without risking * to drop legitimate frames anymore. @@@ -1869,14 -1868,14 +1869,14 @@@ static int dpaa2_eth_set_features(struc
if (changed & NETIF_F_RXCSUM) { enable = !!(features & NETIF_F_RXCSUM); - err = set_rx_csum(priv, enable); + err = dpaa2_eth_set_rx_csum(priv, enable); if (err) return err; }
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); - err = set_tx_csum(priv, enable); + err = dpaa2_eth_set_tx_csum(priv, enable); if (err) return err; } @@@ -1945,7 -1944,7 +1945,7 @@@ static bool xdp_mtu_valid(struct dpaa2_ return true; }
-static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) +static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) { int mfl, err;
@@@ -1979,7 -1978,7 +1979,7 @@@ static int dpaa2_eth_change_mtu(struct if (!xdp_mtu_valid(priv, new_mtu)) return -EINVAL;
- err = set_rx_mfl(priv, new_mtu, true); + err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true); if (err) return err;
@@@ -1988,7 -1987,7 +1988,7 @@@ out return 0; }
-static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) +static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) { struct dpni_buffer_layout buf_layout = {0}; int err; @@@ -2014,7 -2013,7 +2014,7 @@@ return 0; }
-static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) +static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) { struct dpaa2_eth_priv *priv = netdev_priv(dev); struct dpaa2_eth_channel *ch; @@@ -2040,10 -2039,10 +2040,10 @@@ * so we are sure no old format buffers will be used from now on. */ if (need_update) { - err = set_rx_mfl(priv, dev->mtu, !!prog); + err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog); if (err) goto out_err; - err = update_rx_buffer_headroom(priv, !!prog); + err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog); if (err) goto out_err; } @@@ -2080,7 -2079,7 +2080,7 @@@ static int dpaa2_eth_xdp(struct net_dev { switch (xdp->command) { case XDP_SETUP_PROG: - return setup_xdp(dev, xdp->prog); + return dpaa2_eth_setup_xdp(dev, xdp->prog); default: return -EINVAL; } @@@ -2317,7 -2316,7 +2317,7 @@@ static const struct net_device_ops dpaa .ndo_setup_tc = dpaa2_eth_setup_tc, };
-static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) +static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) { struct dpaa2_eth_channel *ch;
@@@ -2330,7 -2329,7 +2330,7 @@@ }
/* Allocate and configure a DPCON object */ -static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) +static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) { struct fsl_mc_device *dpcon; struct device *dev = priv->net_dev->dev.parent; @@@ -2374,15 -2373,16 +2374,15 @@@ free return ERR_PTR(err); }
-static void free_dpcon(struct dpaa2_eth_priv *priv, - struct fsl_mc_device *dpcon) +static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv, + struct fsl_mc_device *dpcon) { dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); dpcon_close(priv->mc_io, 0, dpcon->mc_handle); fsl_mc_object_free(dpcon); }
-static struct dpaa2_eth_channel * -alloc_channel(struct dpaa2_eth_priv *priv) +static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_channel *channel; struct dpcon_attr attr; @@@ -2393,7 -2393,7 +2393,7 @@@ if (!channel) return NULL;
- channel->dpcon = setup_dpcon(priv); + channel->dpcon = dpaa2_eth_setup_dpcon(priv); if (IS_ERR(channel->dpcon)) { err = PTR_ERR(channel->dpcon); goto err_setup; @@@ -2413,23 -2413,23 +2413,23 @@@ return channel;
err_get_attr: - free_dpcon(priv, channel->dpcon); + dpaa2_eth_free_dpcon(priv, channel->dpcon); err_setup: kfree(channel); return ERR_PTR(err); }
-static void free_channel(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *channel) +static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *channel) { - free_dpcon(priv, channel->dpcon); + dpaa2_eth_free_dpcon(priv, channel->dpcon); kfree(channel); }
/* DPIO setup: allocate and configure QBMan channels, setup core affinity * and register data availability notifications */ -static int setup_dpio(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) { struct dpaa2_io_notification_ctx *nctx; struct dpaa2_eth_channel *channel; @@@ -2449,7 -2449,7 +2449,7 @@@ cpumask_clear(&priv->dpio_cpumask); for_each_online_cpu(i) { /* Try to allocate a channel */ - channel = alloc_channel(priv); + channel = dpaa2_eth_alloc_channel(priv); if (IS_ERR_OR_NULL(channel)) { err = PTR_ERR_OR_ZERO(channel); if (err != -EPROBE_DEFER) @@@ -2462,7 -2462,7 +2462,7 @@@
nctx = &channel->nctx; nctx->is_cdan = 1; - nctx->cb = cdan_cb; + nctx->cb = dpaa2_eth_cdan_cb; nctx->id = channel->ch_id; nctx->desired_cpu = i;
@@@ -2510,14 -2510,14 +2510,14 @@@ err_set_cdan: dpaa2_io_service_deregister(channel->dpio, nctx, dev); err_service_reg: - free_channel(priv, channel); + dpaa2_eth_free_channel(priv, channel); err_alloc_ch: if (err == -EPROBE_DEFER) { for (i = 0; i < priv->num_channels; i++) { channel = priv->channel[i]; nctx = &channel->nctx; dpaa2_io_service_deregister(channel->dpio, nctx, dev); - free_channel(priv, channel); + dpaa2_eth_free_channel(priv, channel); } priv->num_channels = 0; return err; @@@ -2534,7 -2534,7 +2534,7 @@@ return 0; }
-static void free_dpio(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_channel *ch; @@@ -2544,12 -2544,12 +2544,12 @@@ for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); - free_channel(priv, ch); + dpaa2_eth_free_channel(priv, ch); } }
-static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, - int cpu) +static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv, + int cpu) { struct device *dev = priv->net_dev->dev.parent; int i; @@@ -2566,7 -2566,7 +2566,7 @@@ return priv->channel[0]; }
-static void set_fq_affinity(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_fq *fq; @@@ -2597,13 -2597,13 +2597,13 @@@ default: dev_err(dev, "Unknown FQ type: %d\n", fq->type); } - fq->channel = get_affine_channel(priv, fq->target_cpu); + fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu); }
update_xps(priv); }
-static void setup_fqs(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) { int i, j;
@@@ -2627,11 -2627,11 +2627,11 @@@ }
/* For each FQ, decide on which core to process incoming frames */ - set_fq_affinity(priv); + dpaa2_eth_set_fq_affinity(priv); }
/* Allocate and configure one buffer pool for each interface */ -static int setup_dpbp(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) { int err; struct fsl_mc_device *dpbp_dev; @@@ -2690,15 -2690,15 +2690,15 @@@ err_open return err; }
-static void free_dpbp(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) { - drain_pool(priv); + dpaa2_eth_drain_pool(priv); dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); fsl_mc_object_free(priv->dpbp_dev); }
-static int set_buffer_layout(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_buffer_layout buf_layout = {0}; @@@ -2815,7 -2815,7 +2815,7 @@@ static inline int dpaa2_eth_enqueue_fq_ return 0; }
-static void set_enqueue_mode(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv) { if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, DPNI_ENQUEUE_FQID_VER_MINOR) < 0) @@@ -2824,7 -2824,7 +2824,7 @@@ priv->enqueue = dpaa2_eth_enqueue_fq_multiple; }
-static int set_pause(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_link_cfg link_cfg = {0}; @@@ -2851,7 -2851,7 +2851,7 @@@ return 0; }
-static void update_tx_fqids(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv) { struct dpni_queue_id qid = {0}; struct dpaa2_eth_fq *fq; @@@ -2893,7 -2893,7 +2893,7 @@@ out_err }
/* Configure ingress classification based on VLAN PCP */ -static int set_vlan_qos(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpkg_profile_cfg kg_cfg = {0}; @@@ -3005,7 -3005,7 +3005,7 @@@ out_free_tbl }
/* Configure the DPNI object this interface is associated with */ -static int setup_dpni(struct fsl_mc_device *ls_dev) +static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) { struct device *dev = &ls_dev->dev; struct dpaa2_eth_priv *priv; @@@ -3053,20 -3053,20 +3053,20 @@@ goto close; }
- err = set_buffer_layout(priv); + err = dpaa2_eth_set_buffer_layout(priv); if (err) goto close;
- set_enqueue_mode(priv); + dpaa2_eth_set_enqueue_mode(priv);
/* Enable pause frame support */ if (dpaa2_eth_has_pause_support(priv)) { - err = set_pause(priv); + err = dpaa2_eth_set_pause(priv); if (err) goto close; }
- err = set_vlan_qos(priv); + err = dpaa2_eth_set_vlan_qos(priv); if (err && err != -EOPNOTSUPP) goto close;
@@@ -3086,7 -3086,7 +3086,7 @@@ close return err; }
-static void free_dpni(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv) { int err;
@@@ -3098,8 -3098,8 +3098,8 @@@ dpni_close(priv->mc_io, 0, priv->mc_token); }
-static int setup_rx_flow(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq) +static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) { struct device *dev = priv->net_dev->dev.parent; struct dpni_queue queue; @@@ -3150,8 -3150,8 +3150,8 @@@ return 0; }
-static int setup_tx_flow(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq) +static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) { struct device *dev = priv->net_dev->dev.parent; struct dpni_queue queue; @@@ -3266,7 -3266,7 +3266,7 @@@ static const struct dpaa2_eth_dist_fiel };
/* Configure the Rx hash key using the legacy API */ -static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) { struct device *dev = priv->net_dev->dev.parent; struct dpni_rx_tc_dist_cfg dist_cfg; @@@ -3291,7 -3291,7 +3291,7 @@@ }
/* Configure the Rx hash key using the new API */ -static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) { struct device *dev = priv->net_dev->dev.parent; struct dpni_rx_dist_cfg dist_cfg; @@@ -3317,7 -3317,7 +3317,7 @@@ }
/* Configure the Rx flow classification key */ -static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) { struct device *dev = priv->net_dev->dev.parent; struct dpni_rx_dist_cfg dist_cfg; @@@ -3452,11 -3452,11 +3452,11 @@@ static int dpaa2_eth_set_dist_key(struc
if (type == DPAA2_ETH_RX_DIST_HASH) { if (dpaa2_eth_has_legacy_dist(priv)) - err = config_legacy_hash_key(priv, key_iova); + err = dpaa2_eth_config_legacy_hash_key(priv, key_iova); else - err = config_hash_key(priv, key_iova); + err = dpaa2_eth_config_hash_key(priv, key_iova); } else { - err = config_cls_key(priv, key_iova); + err = dpaa2_eth_config_cls_key(priv, key_iova); }
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, @@@ -3531,7 -3531,7 +3531,7 @@@ out /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, * frame queues and channels */ -static int bind_dpni(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; @@@ -3579,10 -3579,10 +3579,10 @@@ for (i = 0; i < priv->num_fqs; i++) { switch (priv->fq[i].type) { case DPAA2_RX_FQ: - err = setup_rx_flow(priv, &priv->fq[i]); + err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]); break; case DPAA2_TX_CONF_FQ: - err = setup_tx_flow(priv, &priv->fq[i]); + err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]); break; default: dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); @@@ -3603,7 -3603,7 +3603,7 @@@ }
/* Allocate rings for storing incoming frame descriptors */ -static int alloc_rings(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; @@@ -3630,7 -3630,7 +3630,7 @@@ err_ring return -ENOMEM; }
-static void free_rings(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) { int i;
@@@ -3638,7 -3638,7 +3638,7 @@@ dpaa2_io_store_destroy(priv->channel[i]->store); }
-static int set_mac_addr(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; @@@ -3703,7 -3703,7 +3703,7 @@@ return 0; }
-static int netdev_init(struct net_device *net_dev) +static int dpaa2_eth_netdev_init(struct net_device *net_dev) { struct device *dev = net_dev->dev.parent; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@@ -3716,7 -3716,7 +3716,7 @@@ net_dev->netdev_ops = &dpaa2_eth_ops; net_dev->ethtool_ops = &dpaa2_ethtool_ops;
- err = set_mac_addr(priv); + err = dpaa2_eth_set_mac_addr(priv); if (err) return err;
@@@ -3771,13 -3771,13 +3771,13 @@@ return 0; }
-static int poll_link_state(void *arg) +static int dpaa2_eth_poll_link_state(void *arg) { struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; int err;
while (!kthread_should_stop()) { - err = link_state_update(priv); + err = dpaa2_eth_link_state_update(priv); if (unlikely(err)) return err;
@@@ -3847,11 -3847,11 +3847,11 @@@ static irqreturn_t dpni_irq0_handler_th }
if (status & DPNI_IRQ_EVENT_LINK_CHANGED) - link_state_update(netdev_priv(net_dev)); + dpaa2_eth_link_state_update(netdev_priv(net_dev));
if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { - set_mac_addr(netdev_priv(net_dev)); - update_tx_fqids(priv); + dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); + dpaa2_eth_update_tx_fqids(priv);
rtnl_lock(); if (priv->mac) @@@ -3864,7 -3864,7 +3864,7 @@@ return IRQ_HANDLED; }
-static int setup_irqs(struct fsl_mc_device *ls_dev) +static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) { int err = 0; struct fsl_mc_device_irq *irq; @@@ -3910,7 -3910,7 +3910,7 @@@ free_mc_irq return err; }
-static void add_ch_napi(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv) { int i; struct dpaa2_eth_channel *ch; @@@ -3923,7 -3923,7 +3923,7 @@@ } }
-static void del_ch_napi(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) { int i; struct dpaa2_eth_channel *ch; @@@ -3970,26 -3970,26 +3970,26 @@@ static int dpaa2_eth_probe(struct fsl_m }
/* MC objects initialization and configuration */ - err = setup_dpni(dpni_dev); + err = dpaa2_eth_setup_dpni(dpni_dev); if (err) goto err_dpni_setup;
- err = setup_dpio(priv); + err = dpaa2_eth_setup_dpio(priv); if (err) goto err_dpio_setup;
- setup_fqs(priv); + dpaa2_eth_setup_fqs(priv);
- err = setup_dpbp(priv); + err = dpaa2_eth_setup_dpbp(priv); if (err) goto err_dpbp_setup;
- err = bind_dpni(priv); + err = dpaa2_eth_bind_dpni(priv); if (err) goto err_bind;
/* Add a NAPI context for each channel */ - add_ch_napi(priv); + dpaa2_eth_add_ch_napi(priv);
/* Percpu statistics */ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); @@@ -4012,21 -4012,21 +4012,21 @@@ goto err_alloc_sgt_cache; }
- err = netdev_init(net_dev); + err = dpaa2_eth_netdev_init(net_dev); if (err) goto err_netdev_init;
/* Configure checksum offload based on current interface flags */ - err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); + err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); if (err) goto err_csum;
- err = set_tx_csum(priv, !!(net_dev->features & - (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); + err = dpaa2_eth_set_tx_csum(priv, + !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); if (err) goto err_csum;
- err = alloc_rings(priv); + err = dpaa2_eth_alloc_rings(priv); if (err) goto err_alloc_rings;
@@@ -4039,10 -4039,10 +4039,10 @@@ } #endif
- err = setup_irqs(dpni_dev); + err = dpaa2_eth_setup_irqs(dpni_dev); if (err) { netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); - priv->poll_thread = kthread_run(poll_link_state, priv, + priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, "%s_poll_link", net_dev->name); if (IS_ERR(priv->poll_thread)) { dev_err(dev, "Error starting polling thread\n"); @@@ -4076,7 -4076,7 +4076,7 @@@ err_connect_mac else fsl_mc_free_irqs(dpni_dev); err_poll_thread: - free_rings(priv); + dpaa2_eth_free_rings(priv); err_alloc_rings: err_csum: err_netdev_init: @@@ -4086,13 -4086,13 +4086,13 @@@ err_alloc_sgt_cache err_alloc_percpu_extras: free_percpu(priv->percpu_stats); err_alloc_percpu_stats: - del_ch_napi(priv); + dpaa2_eth_del_ch_napi(priv); err_bind: - free_dpbp(priv); + dpaa2_eth_free_dpbp(priv); err_dpbp_setup: - free_dpio(priv); + dpaa2_eth_free_dpio(priv); err_dpio_setup: - free_dpni(priv); + dpaa2_eth_free_dpni(priv); err_dpni_setup: fsl_mc_portal_free(priv->mc_io); err_portal_alloc: @@@ -4126,15 -4126,15 +4126,15 @@@ static int dpaa2_eth_remove(struct fsl_ else fsl_mc_free_irqs(ls_dev);
- free_rings(priv); + dpaa2_eth_free_rings(priv); free_percpu(priv->sgt_cache); free_percpu(priv->percpu_stats); free_percpu(priv->percpu_extras);
- del_ch_napi(priv); - free_dpbp(priv); - free_dpio(priv); - free_dpni(priv); + dpaa2_eth_del_ch_napi(priv); + dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpio(priv); + dpaa2_eth_free_dpni(priv);
fsl_mc_portal_free(priv->mc_io);
diff --combined drivers/net/ethernet/hisilicon/hns/hns_enet.c index 3af33ade7b60,22522f8a5299..b13f3a5cdf59 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@@ -557,7 -557,10 +557,7 @@@ static int hns_nic_poll_rx_skb(struct h va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif + net_prefetch(va);
skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); @@@ -2279,8 -2282,10 +2279,10 @@@ static int hns_nic_dev_probe(struct pla priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + }
/* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@@ -2296,7 -2301,8 +2298,8 @@@ priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; }
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1a1ba6a41bfe,a4f1d515e5e0..47ab2a5c7391 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@@ -21,6 -21,7 +21,7 @@@ #include <net/pkt_cls.h> #include <net/tcp.h> #include <net/vxlan.h> + #include <net/geneve.h>
#include "hnae3.h" #include "hns3_enet.h" @@@ -780,7 -781,7 +781,7 @@@ static int hns3_get_l4_protocol(struct * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the * hardware will not do the checksum offload when udp dest port is - * 4789. + * 4789 or 6081. */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { @@@ -789,7 -790,8 +790,8 @@@ l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation && - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT)))) return false;
skb_checksum_help(skb); @@@ -2746,7 -2748,7 +2748,7 @@@ static void hns3_rx_checksum(struct hns case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; - /* fall through */ + fallthrough; case HNS3_OL4_TYPE_NO_TUN: l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); @@@ -3091,7 -3093,10 +3093,7 @@@ static int hns3_handle_rx_bd(struct hns * lines. In such a case, single fetch would suffice to cache in the * relevant part of the header. */ - prefetch(ring->va); -#if L1_CACHE_BYTES < 128 - prefetch(ring->va + L1_CACHE_BYTES); -#endif + net_prefetch(ring->va);
if (!skb) { ret = hns3_alloc_skb(ring, length, ring->va); diff --combined drivers/net/ethernet/ibm/ibmvnic.c index 994358689de9,d3a774331afc..6b619c190239 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -104,7 -104,8 +104,7 @@@ static int send_login(struct ibmvnic_ad static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); -static int ibmvnic_init(struct ibmvnic_adapter *); -static int ibmvnic_reset_init(struct ibmvnic_adapter *); +static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); static void release_crq_queue(struct ibmvnic_adapter *); static int __ibmvnic_set_mac(struct net_device *, u8 *); static int init_crq_queue(struct ibmvnic_adapter *adapter); @@@ -296,7 -297,8 +296,7 @@@ static void deactivate_rx_pools(struct { int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) + for (i = 0; i < adapter->num_active_rx_pools; i++) adapter->rx_pool[i].active = 0; }
@@@ -304,7 -306,6 +304,7 @@@ static void replenish_rx_pool(struct ib struct ibmvnic_rx_pool *pool) { int count = pool->size - atomic_read(&pool->available); + u64 handle = adapter->rx_scrq[pool->index]->handle; struct device *dev = &adapter->vdev->dev; int buffers_added = 0; unsigned long lpar_rc; @@@ -313,6 -314,7 +313,6 @@@ unsigned int offset; dma_addr_t dma_addr; unsigned char *dst; - u64 *handle_array; int shift = 0; int index; int i; @@@ -320,6 -322,10 +320,6 @@@ if (!pool->active) return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf-> - off_rxadd_subcrqs)); - for (i = 0; i < count; ++i) { skb = alloc_skb(pool->buff_size, GFP_ATOMIC); if (!skb) { @@@ -363,7 -369,8 +363,7 @@@ #endif sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
- lpar_rc = send_subcrq(adapter, handle_array[pool->index], - &sub_crq); + lpar_rc = send_subcrq(adapter, handle, &sub_crq); if (lpar_rc != H_SUCCESS) goto failure;
@@@ -400,7 -407,8 +400,7 @@@ static void replenish_pools(struct ibmv int i;
adapter->replenish_task_cycles++; - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) { + for (i = 0; i < adapter->num_active_rx_pools; i++) { if (adapter->rx_pool[i].active) replenish_rx_pool(adapter, &adapter->rx_pool[i]); } @@@ -467,20 -475,25 +467,23 @@@ static int init_stats_token(struct ibmv static int reset_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; + u64 buff_size; int rx_scrqs; int i, j, rc; - u64 *size_array;
+ if (!adapter->rx_pool) + return -1; + - size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); - - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + buff_size = adapter->cur_rx_buf_sz; + rx_scrqs = adapter->num_active_rx_pools; for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { + if (rx_pool->buff_size != buff_size) { free_long_term_buff(adapter, &rx_pool->long_term_buff); - rx_pool->buff_size = be64_to_cpu(size_array[i]); + rx_pool->buff_size = buff_size; rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, rx_pool->size * @@@ -548,11 -561,13 +551,11 @@@ static int init_rx_pools(struct net_dev struct device *dev = &adapter->vdev->dev; struct ibmvnic_rx_pool *rx_pool; int rxadd_subcrqs; - u64 *size_array; + u64 buff_size; int i, j;
- rxadd_subcrqs = - be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); + rxadd_subcrqs = adapter->num_active_rx_scrqs; + buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), @@@ -570,11 -585,11 +573,11 @@@ netdev_dbg(adapter->netdev, "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", i, adapter->req_rx_add_entries_per_subcrq, - be64_to_cpu(size_array[i])); + buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq; rx_pool->index = i; - rx_pool->buff_size = be64_to_cpu(size_array[i]); + rx_pool->buff_size = buff_size; rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), @@@ -637,7 -652,10 +640,10 @@@ static int reset_tx_pools(struct ibmvni int tx_scrqs; int i, rc;
+ if (!adapter->tx_pool) + return -1; + - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_scrqs = adapter->num_active_tx_pools; for (i = 0; i < tx_scrqs; i++) { rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); if (rc) @@@ -726,7 -744,7 +732,7 @@@ static int init_tx_pools(struct net_dev int tx_subcrqs; int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_subcrqs = adapter->num_active_tx_scrqs; adapter->tx_pool = kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) @@@ -962,7 -980,7 +968,7 @@@ static int set_link_state(struct ibmvni return -1; }
- if (adapter->init_done_rc == 1) { + if (adapter->init_done_rc == PARTIALSUCCESS) { /* Partuial success, delay and re-send */ mdelay(1000); resend = true; @@@ -1512,9 -1530,9 +1518,9 @@@ static netdev_tx_t ibmvnic_xmit(struct unsigned int offset; int num_entries = 1; unsigned char *dst; - u64 *handle_array; int index = 0; u8 proto = 0; + u64 handle; netdev_tx_t ret = NETDEV_TX_OK;
if (test_bit(0, &adapter->resetting)) { @@@ -1541,7 -1559,8 +1547,7 @@@
tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); - handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); + handle = tx_scrq->handle;
index = tx_pool->free_map[tx_pool->consumer_index];
@@@ -1653,14 -1672,14 +1659,14 @@@ ret = NETDEV_TX_OK; goto tx_err_out; } - lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], + lpar_rc = send_subcrq_indirect(adapter, handle, (u64)tx_buff->indir_dma, (u64)num_entries); dma_unmap_single(dev, tx_buff->indir_dma, sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); } else { tx_buff->num_entries = num_entries; - lpar_rc = send_subcrq(adapter, handle_array[queue_num], + lpar_rc = send_subcrq(adapter, handle, &tx_crq); } if (lpar_rc != H_SUCCESS) { @@@ -1855,7 -1874,7 +1861,7 @@@ static int do_change_param_reset(struc return rc; }
- rc = ibmvnic_reset_init(adapter); + rc = ibmvnic_reset_init(adapter, true); if (rc) return IBMVNIC_INIT_FAILED;
@@@ -1973,7 -1992,7 +1979,7 @@@ static int do_reset(struct ibmvnic_adap goto out; }
- rc = ibmvnic_reset_init(adapter); + rc = ibmvnic_reset_init(adapter, true); if (rc) { rc = IBMVNIC_INIT_FAILED; goto out; @@@ -1998,7 -2017,10 +2004,10 @@@ adapter->req_rx_add_entries_per_subcrq != old_num_rx_slots || adapter->req_tx_entries_per_subcrq != - old_num_tx_slots) { + old_num_tx_slots || + !adapter->rx_pool || + !adapter->tso_pool || + !adapter->tx_pool) { release_rx_pools(adapter); release_tx_pools(adapter); release_napi(adapter); @@@ -2011,10 -2033,14 +2020,14 @@@ } else { rc = reset_tx_pools(adapter); if (rc) + netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + rc); goto out;
rc = reset_rx_pools(adapter); if (rc) + netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + rc); goto out; } ibmvnic_disable_irqs(adapter); @@@ -2080,7 -2106,7 +2093,7 @@@ static int do_hard_reset(struct ibmvnic return rc; }
- rc = ibmvnic_init(adapter); + rc = ibmvnic_reset_init(adapter, false); if (rc) return rc;
@@@ -3555,7 -3581,8 +3568,7 @@@ static int ibmvnic_send_crq(struct ibmv if (rc) { if (rc == H_CLOSED) { dev_warn(dev, "CRQ Queue closed\n"); - if (test_bit(0, &adapter->resetting)) - ibmvnic_reset(adapter, VNIC_RESET_FATAL); + /* do not reset, report the fail, wait for passive init from server */ }
dev_warn(dev, "Send error (rc=%d)\n", rc); @@@ -3566,31 -3593,14 +3579,31 @@@
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) { + struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; + int retries = 100; + int rc;
memset(&crq, 0, sizeof(crq)); crq.generic.first = IBMVNIC_CRQ_INIT_CMD; crq.generic.cmd = IBMVNIC_CRQ_INIT; netdev_dbg(adapter->netdev, "Sending CRQ init\n");
- return ibmvnic_send_crq(adapter, &crq); + do { + rc = ibmvnic_send_crq(adapter, &crq); + if (rc != H_CLOSED) + break; + retries--; + msleep(50); + + } while (retries > 0); + + if (rc) { + dev_err(dev, "Failed to send init request, rc = %d\n", rc); + return rc; + } + + return 0; }
static int send_version_xchg(struct ibmvnic_adapter *adapter) @@@ -4295,11 -4305,6 +4308,11 @@@ static int handle_login_rsp(union ibmvn struct net_device *netdev = adapter->netdev; struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; struct ibmvnic_login_buffer *login = adapter->login_buf; + u64 *tx_handle_array; + u64 *rx_handle_array; + int num_tx_pools; + int num_rx_pools; + u64 *size_array; int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, @@@ -4334,30 -4339,6 +4347,30 @@@ ibmvnic_remove(adapter->vdev); return -EIO; } + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); + /* variable buffer sizes are not supported, so just read the + * first entry. + */ + adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); + + num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + + tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); + rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); + + for (i = 0; i < num_tx_pools; i++) + adapter->tx_scrq[i]->handle = tx_handle_array[i]; + + for (i = 0; i < num_rx_pools; i++) + adapter->rx_scrq[i]->handle = rx_handle_array[i]; + + adapter->num_active_tx_scrqs = num_tx_pools; + adapter->num_active_rx_scrqs = num_rx_pools; + release_login_rsp_buffer(adapter); release_login_buffer(adapter); complete(&adapter->init_done);
@@@ -5003,7 -4984,7 +5016,7 @@@ map_failed return retrc; }
-static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) +static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); @@@ -5012,19 -4993,12 +5025,19 @@@
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues; - old_num_tx_queues = adapter->req_tx_queues; + if (reset) { + old_num_rx_queues = adapter->req_rx_queues; + old_num_tx_queues = adapter->req_tx_queues; + reinit_completion(&adapter->init_done); + }
- reinit_completion(&adapter->init_done); adapter->init_done_rc = 0; - ibmvnic_send_crq_init(adapter); + rc = ibmvnic_send_crq_init(adapter); + if (rc) { + dev_err(dev, "Send crq init failed with error %d\n", rc); + return rc; + } + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Initialization sequence timed out\n"); return -1; @@@ -5035,8 -5009,13 +5048,8 @@@ return adapter->init_done_rc; }
- if (adapter->from_passive_init) { - adapter->state = VNIC_OPEN; - adapter->from_passive_init = false; - return -1; - } - - if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && + if (reset && + test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && adapter->reset_reason != VNIC_RESET_MOBILITY) { if (adapter->req_rx_queues != old_num_rx_queues || adapter->req_tx_queues != old_num_tx_queues) { @@@ -5064,6 -5043,48 +5077,6 @@@ return rc; }
-static int ibmvnic_init(struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - unsigned long timeout = msecs_to_jiffies(30000); - int rc; - - adapter->from_passive_init = false; - - adapter->init_done_rc = 0; - ibmvnic_send_crq_init(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - dev_err(dev, "Initialization sequence timed out\n"); - return -1; - } - - if (adapter->init_done_rc) { - release_crq_queue(adapter); - return adapter->init_done_rc; - } - - if (adapter->from_passive_init) { - adapter->state = VNIC_OPEN; - adapter->from_passive_init = false; - return -1; - } - - rc = init_sub_crqs(adapter); - if (rc) { - dev_err(dev, "Initialization of sub crqs failed\n"); - release_crq_queue(adapter); - return rc; - } - - rc = init_sub_crq_irqs(adapter); - if (rc) { - dev_err(dev, "Failed to initialize sub crq irqs\n"); - release_crq_queue(adapter); - } - - return rc; -} - static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) @@@ -5126,7 -5147,7 +5139,7 @@@ goto ibmvnic_init_fail; }
- rc = ibmvnic_init(adapter); + rc = ibmvnic_reset_init(adapter, false); if (rc && rc != EAGAIN) goto ibmvnic_init_fail; } while (rc == EAGAIN); @@@ -5276,7 -5297,8 +5289,7 @@@ static unsigned long ibmvnic_get_desire for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) + for (i = 0; i < adapter->num_active_rx_pools; i++) ret += adapter->rx_pool[i].size * IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
diff --combined drivers/net/ethernet/intel/igb/igb_main.c index 698bb6a4b088,d9c3a6b169f9..e1e37d0b7703 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -718,7 -718,6 +718,6 @@@ static void igb_cache_ring_register(str case e1000_i354: case e1000_i210: case e1000_i211: - fallthrough; default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@@ -8047,7 -8046,10 +8046,7 @@@ static struct sk_buff *igb_construct_sk struct sk_buff *skb;
/* prefetch first cache line of first page */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif + net_prefetch(va);
/* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); @@@ -8101,7 -8103,10 +8100,7 @@@ static struct sk_buff *igb_build_skb(st struct sk_buff *skb;
/* prefetch first cache line of first page */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif + net_prefetch(va);
/* build an skb around the page buffer */ skb = build_skb(va - IGB_SKB_PAD, truesize); diff --combined drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d0bbe3a64b8d,6e140d1b8967..ee8b6a9037ce --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@@ -57,7 -57,13 +57,7 @@@ static struct /* The prototype is added here to be used in start_dev when using ACPI. This * will be removed once phylink is used for all modes (dt+ACPI). */ -static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, - const struct phylink_link_state *state); -static void mvpp2_mac_link_up(struct phylink_config *config, - struct phy_device *phy, - unsigned int mode, phy_interface_t interface, - int speed, int duplex, - bool tx_pause, bool rx_pause); +static void mvpp2_acpi_start(struct mvpp2_port *port);
/* Queue modes */ #define MVPP2_QDIST_SINGLE_MODE 0 @@@ -1479,8 -1485,8 +1479,8 @@@ static void mvpp2_port_loopback_set(str else val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (phy_interface_mode_is_8023z(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) + if (phy_interface_mode_is_8023z(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) val |= MVPP2_GMAC_PCS_LB_EN_MASK; else val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; @@@ -4001,7 -4007,17 +4001,7 @@@ static void mvpp2_start_dev(struct mvpp if (port->phylink) { phylink_start(port->phylink); } else { - /* Phylink isn't used as of now for ACPI, so the MAC has to be - * configured manually when the interface is started. This will - * be removed as soon as the phylink ACPI support lands in. - */ - struct phylink_link_state state = { - .interface = port->phy_interface, - }; - mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); - mvpp2_mac_link_up(&port->phylink_config, NULL, - MLO_AN_INBAND, port->phy_interface, - SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); + mvpp2_acpi_start(port); }
netif_tx_start_all_queues(port->dev); @@@ -5376,155 -5392,6 +5376,155 @@@ static struct mvpp2_port *mvpp2_phylink return container_of(config, struct mvpp2_port, phylink_config); }
+static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mvpp2_port, phylink_pcs); +} + +static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 val; + + state->speed = SPEED_10000; + state->duplex = 1; + state->an_complete = 1; + + val = readl(port->base + MVPP22_XLG_STATUS); + state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); + + state->pause = 0; + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_TX; + if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_RX; +} + +static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { + .pcs_get_state = mvpp2_xlg_pcs_get_state, + .pcs_config = mvpp2_xlg_pcs_config, +}; + +static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 val; + + val = readl(port->base + MVPP2_GMAC_STATUS0); + + state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); + state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); + state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_1000BASEX: + state->speed = SPEED_1000; + break; + case PHY_INTERFACE_MODE_2500BASEX: + state->speed = SPEED_2500; + break; + default: + if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + state->speed = SPEED_1000; + else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + } + + state->pause = 0; + if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) + state->pause |= MLO_PAUSE_RX; + if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) + state->pause |= MLO_PAUSE_TX; +} + +static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 mask, val, an, old_an, changed; + + mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | + MVPP2_GMAC_IN_BAND_AUTONEG | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_AN_DUPLEX_EN; + + if (phylink_autoneg_inband(mode)) { + mask |= MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + val = MVPP2_GMAC_IN_BAND_AUTONEG; + + if (interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the speed and duplex from PHY */ + val |= MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z mode has fixed speed and duplex */ + val |= MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + + /* The FLOW_CTRL_AUTONEG bit selects either the hardware + * automatically or the bits in MVPP22_GMAC_CTRL_4_REG + * manually controls the GMAC pause modes. + */ + if (permit_pause_to_mac) + val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; + + /* Configure advertisement bits */ + mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; + if (phylink_test(advertising, Pause)) + val |= MVPP2_GMAC_FC_ADV_EN; + if (phylink_test(advertising, Asym_Pause)) + val |= MVPP2_GMAC_FC_ADV_ASM_EN; + } + } else { + val = 0; + } + + old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + an = (an & ~mask) | val; + changed = an ^ old_an; + if (changed) + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* We are only interested in the advertisement bits changing */ + return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); +} + +static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { + .pcs_get_state = mvpp2_gmac_pcs_get_state, + .pcs_config = mvpp2_gmac_pcs_config, + .pcs_an_restart = mvpp2_gmac_pcs_an_restart, +}; + static void mvpp2_phylink_validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state) @@@ -5570,7 -5437,7 +5570,7 @@@ } if (state->interface != PHY_INTERFACE_MODE_NA) break; - /* Fall-through */ + fallthrough; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: @@@ -5584,7 -5451,7 +5584,7 @@@ phylink_set(mask, 1000baseX_Full); if (state->interface != PHY_INTERFACE_MODE_NA) break; - /* Fall-through */ + fallthrough; case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: if (port->comphy || @@@ -5613,6 -5480,89 +5613,6 @@@ empty_set bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); }
-static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port, - struct phylink_link_state *state) -{ - u32 val; - - state->speed = SPEED_10000; - state->duplex = 1; - state->an_complete = 1; - - val = readl(port->base + MVPP22_XLG_STATUS); - state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); - - state->pause = 0; - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) - state->pause |= MLO_PAUSE_TX; - if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) - state->pause |= MLO_PAUSE_RX; -} - -static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port, - struct phylink_link_state *state) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_STATUS0); - - state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); - state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); - state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_1000BASEX: - state->speed = SPEED_1000; - break; - case PHY_INTERFACE_MODE_2500BASEX: - state->speed = SPEED_2500; - break; - default: - if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) - state->speed = SPEED_1000; - else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) - state->speed = SPEED_100; - else - state->speed = SPEED_10; - } - - state->pause = 0; - if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) - state->pause |= MLO_PAUSE_RX; - if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) - state->pause |= MLO_PAUSE_TX; -} - -static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state) -{ - struct mvpp2_port *port = mvpp2_phylink_to_port(config); - - if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { - u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); - mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - - if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { - mvpp22_xlg_pcs_get_state(port, state); - return; - } - } - - mvpp2_gmac_pcs_get_state(port, state); -} - -static void mvpp2_mac_an_restart(struct phylink_config *config) -{ - struct mvpp2_port *port = mvpp2_phylink_to_port(config); - u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, - port->base + MVPP2_GMAC_AUTONEG_CONFIG); - writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, - port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { @@@ -5636,16 -5586,23 +5636,16 @@@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { - u32 old_an, an; u32 old_ctrl0, ctrl0; u32 old_ctrl2, ctrl2; u32 old_ctrl4, ctrl4;
- old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | - MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | - MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG | - MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS); ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; - ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK | - MVPP2_GMAC_PCS_ENABLE_MASK); + ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
/* Configure port type */ if (phy_interface_mode_is_8023z(state->interface)) { @@@ -5667,6 -5624,12 +5667,6 @@@ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; }
- /* Configure advertisement bits */ - if (phylink_test(state->advertising, Pause)) - an |= MVPP2_GMAC_FC_ADV_EN; - if (phylink_test(state->advertising, Asym_Pause)) - an |= MVPP2_GMAC_FC_ADV_ASM_EN; - /* Configure negotiation style */ if (!phylink_autoneg_inband(mode)) { /* Phy or fixed speed - no in-band AN, nothing to do, leave the @@@ -5675,6 -5638,14 +5675,6 @@@ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII in-band mode receives the speed and duplex from * the PHY. Flow control information is not received. */ - an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | - MVPP2_GMAC_FORCE_LINK_PASS | - MVPP2_GMAC_CONFIG_MII_SPEED | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX); - an |= MVPP2_GMAC_IN_BAND_AUTONEG | - MVPP2_GMAC_AN_SPEED_EN | - MVPP2_GMAC_AN_DUPLEX_EN; } else if (phy_interface_mode_is_8023z(state->interface)) { /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can * they negotiate duplex: they are always operating with a fixed @@@ -5682,6 -5653,42 +5682,6 @@@ * speed and full duplex here. */ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; - an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | - MVPP2_GMAC_FORCE_LINK_PASS | - MVPP2_GMAC_CONFIG_MII_SPEED | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX); - an |= MVPP2_GMAC_IN_BAND_AUTONEG | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX; - - if (state->pause & MLO_PAUSE_AN && state->an_enabled) - an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; - } - -/* Some fields of the auto-negotiation register require the port to be down when - * their value is updated. - */ -#define MVPP2_GMAC_AN_PORT_DOWN_MASK \ - (MVPP2_GMAC_IN_BAND_AUTONEG | \ - MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \ - MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \ - MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \ - MVPP2_GMAC_AN_DUPLEX_EN) - - if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK || - (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK || - (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) { - /* Force link down */ - old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS; - old_an |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - /* Set the GMAC in a reset state - do this in a way that - * ensures we clear it below. - */ - old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; - writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); }
if (old_ctrl0 != ctrl0) @@@ -5690,85 -5697,41 +5690,85 @@@ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); if (old_ctrl4 != ctrl4) writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); - if (old_an != an) - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - MVPP2_GMAC_PORT_RESET_MASK) - continue; - } }
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, - const struct phylink_link_state *state) +static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); - bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */ - if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { + if (mvpp2_is_xlg(interface) && port->gop_id != 0) { netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); - return; + return -EINVAL; + } + + if (port->phy_interface != interface || + phylink_autoneg_inband(mode)) { + /* Force the link down when changing the interface or if in + * in-band mode to ensure we do not change the configuration + * while the hardware is indicating link is up. We force both + * XLG and GMAC down to ensure that they're both in a known + * state. + */ + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, + MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_FORCE_LINK_DOWN, + MVPP2_GMAC_FORCE_LINK_DOWN); + + if (mvpp2_port_supports_xlg(port)) + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); }
/* Make sure the port is disabled when reconfiguring the mode */ mvpp2_port_disable(port);
- if (port->priv->hw_version == MVPP22 && change_interface) { - mvpp22_gop_mask_irq(port); + if (port->phy_interface != interface) { + /* Place GMAC into reset */ + mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, + MVPP2_GMAC_PORT_RESET_MASK, + MVPP2_GMAC_PORT_RESET_MASK);
- port->phy_interface = state->interface; + if (port->priv->hw_version == MVPP22) { + mvpp22_gop_mask_irq(port);
- /* Reconfigure the serdes lanes */ - phy_power_off(port->comphy); - mvpp22_mode_reconfigure(port); + phy_power_off(port->comphy); + } }
+ /* Select the appropriate PCS operations depending on the + * configured interface mode. We will only switch to a mode + * that the validate() checks have already passed. + */ + if (mvpp2_is_xlg(interface)) + port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops; + else + port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops; + + return 0; +} + +static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + int ret; + + ret = mvpp2__mac_prepare(config, mode, interface); + if (ret == 0) + phylink_set_pcs(port->phylink, &port->phylink_pcs); + + return ret; +} + +static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + /* mac (re)configuration */ if (mvpp2_is_xlg(state->interface)) mvpp2_xlg_config(port, mode, state); @@@ -5779,51 -5742,11 +5779,51 @@@
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) mvpp2_port_loopback_set(port, state); +} + +static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + if (port->priv->hw_version == MVPP22 && + port->phy_interface != interface) { + port->phy_interface = interface; + + /* Reconfigure the serdes lanes */ + mvpp22_mode_reconfigure(port);
- if (port->priv->hw_version == MVPP22 && change_interface) + /* Unmask interrupts */ mvpp22_gop_unmask_irq(port); + } + + if (!mvpp2_is_xlg(interface)) { + /* Release GMAC reset and wait */ + mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, + MVPP2_GMAC_PORT_RESET_MASK, 0); + + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; + }
mvpp2_port_enable(port); + + /* Allow the link to come up if in in-band mode, otherwise the + * link is forced via mac_link_down()/mac_link_up() + */ + if (phylink_autoneg_inband(mode)) { + if (mvpp2_is_xlg(interface)) + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); + else + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, + MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_FORCE_LINK_DOWN, 0); + } + + return 0; }
static void mvpp2_mac_link_up(struct phylink_config *config, @@@ -5920,36 -5843,13 +5920,36 @@@ static void mvpp2_mac_link_down(struct
static const struct phylink_mac_ops mvpp2_phylink_ops = { .validate = mvpp2_phylink_validate, - .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state, - .mac_an_restart = mvpp2_mac_an_restart, + .mac_prepare = mvpp2_mac_prepare, .mac_config = mvpp2_mac_config, + .mac_finish = mvpp2_mac_finish, .mac_link_up = mvpp2_mac_link_up, .mac_link_down = mvpp2_mac_link_down, };
+/* Work-around for ACPI */ +static void mvpp2_acpi_start(struct mvpp2_port *port) +{ + /* Phylink isn't used as of now for ACPI, so the MAC has to be + * configured manually when the interface is started. This will + * be removed as soon as the phylink ACPI support lands in. + */ + struct phylink_link_state state = { + .interface = port->phy_interface, + }; + mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND, + port->phy_interface); + mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); + port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND, + port->phy_interface, + state.advertising, false); + mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, + port->phy_interface); + mvpp2_mac_link_up(&port->phylink_config, NULL, + MLO_AN_INBAND, port->phy_interface, + SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); +} + /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct fwnode_handle *port_fwnode, diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index bb8c607cdcba,01a793105599..08181fc5f5d4 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@@ -737,7 -737,7 +737,7 @@@ static int rvu_nix_aq_enq_inst(struct r else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, sizeof(struct nix_rx_mce_s)); - /* Fall through */ + fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); @@@ -3319,49 -3319,6 +3319,49 @@@ void rvu_nix_lf_teardown(struct rvu *rv nix_ctx_free(rvu, pfvf); }
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) + +static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr; + int nixlf; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + + if (enable) + cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; + else + cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + return 0; +} + +int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); +} + +int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); +} + int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, struct nix_lso_format_cfg *req, struct nix_lso_format_cfg_rsp *rsp) diff --combined drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 8232bc0f5c03,61719ec89808..252e91072c5a --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@@ -12,17 -12,8 +12,17 @@@ #include "core.h" #include "core_env.h"
-#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127 -#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \ +#define MLXSW_HWMON_SENSORS_MAX_COUNT 64 +#define MLXSW_HWMON_MODULES_MAX_COUNT 64 +#define MLXSW_HWMON_GEARBOXES_MAX_COUNT 32 + +#define MLXSW_HWMON_ATTR_PER_SENSOR 3 +#define MLXSW_HWMON_ATTR_PER_MODULE 7 +#define MLXSW_HWMON_ATTR_PER_GEARBOX 4 + +#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \ + MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \ + MLXSW_HWMON_GEARBOXES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_GEARBOX + \ MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX)
struct mlxsw_hwmon_attr { @@@ -214,39 -205,25 +214,39 @@@ static ssize_t mlxsw_hwmon_pwm_store(st return len; }
-static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static int mlxsw_hwmon_module_temp_get(struct device *dev, + struct device_attribute *attr, + int *p_temp) { struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; u8 module; - int temp; int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(dev, "Failed to query module temperature\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL); + + return 0; +} + +static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int err, temp; + + err = mlxsw_hwmon_module_temp_get(dev, attr, &temp); if (err) return err; - mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
return sprintf(buf, "%d\n", temp); } @@@ -282,8 -259,8 +282,8 @@@ static ssize_t mlxsw_hwmon_module_temp_ */ fault = 1; break; - case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ - case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_NO_CONN: + case MLXSW_REG_MTBR_NO_TEMP_SENS: case MLXSW_REG_MTBR_INDEX_NA: default: fault = 0; @@@ -293,72 -270,48 +293,72 @@@ return sprintf(buf, "%u\n", fault); }
-static ssize_t -mlxsw_hwmon_module_temp_critical_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int mlxsw_hwmon_module_temp_critical_get(struct device *dev, + struct device_attribute *attr, + int *p_temp) { struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; - int temp; u8 module; int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, - SFP_TEMP_HIGH_WARN, &temp); + SFP_TEMP_HIGH_WARN, p_temp); if (err) { dev_err(dev, "Failed to query module temperature thresholds\n"); return err; }
- return sprintf(buf, "%u\n", temp); + return 0; }
static ssize_t -mlxsw_hwmon_module_temp_emergency_show(struct device *dev, - struct device_attribute *attr, - char *buf) +mlxsw_hwmon_module_temp_critical_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int err, temp; + + err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &temp); + if (err) + return err; + + return sprintf(buf, "%u\n", temp); +} + +static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev, + struct device_attribute *attr, + int *p_temp) { struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; u8 module; - int temp; int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, - SFP_TEMP_HIGH_ALARM, &temp); + SFP_TEMP_HIGH_ALARM, p_temp); if (err) { dev_err(dev, "Failed to query module temperature thresholds\n"); return err; }
+ return 0; +} + +static ssize_t +mlxsw_hwmon_module_temp_emergency_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int err, temp; + + err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &temp); + if (err) + return err; + return sprintf(buf, "%u\n", temp); }
@@@ -388,53 -341,6 +388,53 @@@ mlxsw_hwmon_gbox_temp_label_show(struc return sprintf(buf, "gearbox %03u\n", index); }
+static ssize_t mlxsw_hwmon_temp_critical_alarm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int err, temp, emergency_temp, critic_temp; + + err = mlxsw_hwmon_module_temp_get(dev, attr, &temp); + if (err) + return err; + + if (temp <= 0) + return sprintf(buf, "%d\n", false); + + err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp); + if (err) + return err; + + if (temp >= emergency_temp) + return sprintf(buf, "%d\n", false); + + err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &critic_temp); + if (err) + return err; + + return sprintf(buf, "%d\n", temp >= critic_temp); +} + +static ssize_t mlxsw_hwmon_temp_emergency_alarm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int err, temp, emergency_temp; + + err = mlxsw_hwmon_module_temp_get(dev, attr, &temp); + if (err) + return err; + + if (temp <= 0) + return sprintf(buf, "%d\n", false); + + err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp); + if (err) + return err; + + return sprintf(buf, "%d\n", temp >= emergency_temp); +} + enum mlxsw_hwmon_attr_type { MLXSW_HWMON_ATTR_TYPE_TEMP, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, @@@ -448,8 -354,6 +448,8 @@@ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL, + MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM, + MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM, };
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, @@@ -540,20 -444,6 +540,20 @@@ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_label", num + 1); break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_temp_critical_alarm_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_crit_alarm", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_temp_emergency_alarm_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_emergency_alarm", num + 1); + break; default: WARN_ON(1); } @@@ -676,12 -566,6 +676,12 @@@ static int mlxsw_hwmon_module_init(stru mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM, + i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM, + i, i); }
return 0; diff --combined drivers/net/ethernet/netronome/nfp/flower/offload.c index 44cf738636ef,36356f96661d..1c59aff2163c --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@@ -31,7 -31,6 +31,7 @@@ BIT(FLOW_DISSECTOR_KEY_PORTS) | \ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT(FLOW_DISSECTOR_KEY_CVLAN) | \ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ @@@ -67,8 -66,7 +67,8 @@@ NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ - (NFP_FLOWER_LAYER_PORT | \ + (NFP_FLOWER_LAYER_EXT_META | \ + NFP_FLOWER_LAYER_PORT | \ NFP_FLOWER_LAYER_MAC | \ NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV6) @@@ -287,30 -285,6 +287,30 @@@ nfp_flower_calculate_key_layers(struct NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); return -EOPNOTSUPP; } + if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && + !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_size += sizeof(struct nfp_flower_vlan); + key_layer_two |= NFP_FLOWER_LAYER2_QINQ; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_match_vlan cvlan; + + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload"); + return -EOPNOTSUPP; + } + + flow_rule_match_vlan(rule, &cvlan); + if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_size += sizeof(struct nfp_flower_vlan); + key_layer_two |= NFP_FLOWER_LAYER2_QINQ; + } }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { @@@ -810,7 -784,7 +810,7 @@@ nfp_flower_copy_pre_actions(char *act_d case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: if (tunnel_act) *tunnel_act = true; - /* fall through */ + fallthrough; case NFP_FL_ACTION_OPCODE_PRE_LAG: memcpy(act_dst + act_off, act_src + act_off, act_len); break; @@@ -1092,7 -1066,6 +1092,7 @@@ err_destroy_merge_flow * nfp_flower_validate_pre_tun_rule() * @app: Pointer to the APP handle * @flow: Pointer to NFP flow representation of rule + * @key_ls: Pointer to NFP key layers structure * @extack: Netlink extended ACK report * * Verifies the flow as a pre-tunnel rule. @@@ -1102,13 -1075,10 +1102,13 @@@ static int nfp_flower_validate_pre_tun_rule(struct nfp_app *app, struct nfp_fl_payload *flow, + struct nfp_fl_key_ls *key_ls, struct netlink_ext_ack *extack) { + struct nfp_flower_priv *priv = app->priv; struct nfp_flower_meta_tci *meta_tci; struct nfp_flower_mac_mpls *mac; + u8 *ext = flow->unmasked_data; struct nfp_fl_act_head *act; u8 *mask = flow->mask_data; bool vlan = false; @@@ -1116,25 -1086,20 +1116,25 @@@ u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; - if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { - u16 vlan_tci = be16_to_cpu(meta_tci->tci); - - vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; - flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); - vlan = true; - } else { - flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + key_layer = key_ls->key_layer; + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { + u16 vlan_tci = be16_to_cpu(meta_tci->tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } }
- key_layer = meta_tci->nfp_flow_key_layer; if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); return -EOPNOTSUPP; + } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); + return -EOPNOTSUPP; }
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { @@@ -1144,13 -1109,7 +1144,13 @@@
/* Skip fields known to exist. */ mask += sizeof(struct nfp_flower_meta_tci); + ext += sizeof(struct nfp_flower_meta_tci); + if (key_ls->key_layer_two) { + mask += sizeof(struct nfp_flower_ext_meta); + ext += sizeof(struct nfp_flower_ext_meta); + } mask += sizeof(struct nfp_flower_in_port); + ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */ mac = (struct nfp_flower_mac_mpls *)mask; @@@ -1159,8 -1118,6 +1159,8 @@@ return -EOPNOTSUPP; }
+ mask += sizeof(struct nfp_flower_mac_mpls); + ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 || key_layer & NFP_FLOWER_LAYER_IPV6) { /* Flags and proto fields have same offset in IPv4 and IPv6. */ @@@ -1173,6 -1130,7 +1173,6 @@@ sizeof(struct nfp_flower_ipv4) : sizeof(struct nfp_flower_ipv6);
- mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */ for (i = 0; i < size; i++) @@@ -1180,25 -1138,6 +1180,25 @@@ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); return -EOPNOTSUPP; } + ext += size; + mask += size; + } + + if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + struct nfp_flower_vlan *vlan_tags; + u16 vlan_tci; + + vlan_tags = (struct nfp_flower_vlan *)ext; + + vlan_tci = be16_to_cpu(vlan_tags->outer_tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } }
/* Action must be a single egress or pop_vlan and egress. */ @@@ -1281,7 -1220,7 +1281,7 @@@ nfp_flower_add_offload(struct nfp_app * goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) { - err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack); if (err) goto err_destroy_flow; } diff --combined drivers/net/ethernet/pensando/ionic/ionic_txrx.c index b5f8d8250aff,def65fee27b5..7225251c5563 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@@ -22,7 -22,7 +22,7 @@@ static bool ionic_tx_service(struct ion static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { - DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); + DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg); } @@@ -32,7 -32,7 +32,7 @@@ static inline void ionic_rxq_post(struc { ionic_q_post(q, ring_dbell, cb_func, cb_arg);
- DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); + DEBUG_STATS_RX_BUFF_CNT(q); }
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) @@@ -49,7 -49,7 +49,7 @@@ static struct sk_buff *ionic_rx_skb_all struct sk_buff *skb;
netdev = lif->netdev; - stats = q_to_rx_stats(q); + stats = &q->lif->rxqstats[q->index];
if (frags) skb = napi_get_frags(&q_to_qcq(q)->napi); @@@ -235,14 -235,14 +235,14 @@@ static bool ionic_rx_service(struct ion return false;
/* check for empty queue */ - if (q->tail->index == q->head->index) + if (q->tail_idx == q->head_idx) return false;
- desc_info = q->tail; - if (desc_info->index != le16_to_cpu(comp->comp_index)) + if (q->tail_idx != le16_to_cpu(comp->comp_index)) return false;
- q->tail = desc_info->next; + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */ ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); @@@ -266,49 -266,40 +266,49 @@@ void ionic_rx_flush(struct ionic_cq *cq work_done, IONIC_INTR_CRED_RESET_COALESCE); }
-static struct page *ionic_rx_page_alloc(struct ionic_queue *q, - dma_addr_t *dma_addr) +static int ionic_rx_page_alloc(struct ionic_queue *q, + struct ionic_page_info *page_info) { struct ionic_lif *lif = q->lif; struct ionic_rx_stats *stats; struct net_device *netdev; struct device *dev; - struct page *page;
netdev = lif->netdev; dev = lif->ionic->dev; stats = q_to_rx_stats(q); - page = alloc_page(GFP_ATOMIC); - if (unlikely(!page)) { - net_err_ratelimited("%s: Page alloc failed on %s!\n", + + if (unlikely(!page_info)) { + net_err_ratelimited("%s: %s invalid page_info in alloc\n", + netdev->name, q->name); + return -EINVAL; + } + + page_info->page = dev_alloc_page(); + if (unlikely(!page_info->page)) { + net_err_ratelimited("%s: %s page alloc failed\n", netdev->name, q->name); stats->alloc_err++; - return NULL; + return -ENOMEM; }
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, *dma_addr))) { - __free_page(page); - net_err_ratelimited("%s: DMA single map failed on %s!\n", + page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) { + put_page(page_info->page); + page_info->dma_addr = 0; + page_info->page = NULL; + net_err_ratelimited("%s: %s dma map failed\n", netdev->name, q->name); stats->dma_map_err++; - return NULL; + return -EIO; }
- return page; + return 0; }
-static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, - dma_addr_t dma_addr) +static void ionic_rx_page_free(struct ionic_queue *q, + struct ionic_page_info *page_info) { struct ionic_lif *lif = q->lif; struct net_device *netdev; @@@ -317,23 -308,15 +317,23 @@@ netdev = lif->netdev; dev = lif->ionic->dev;
- if (unlikely(!page)) { - net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n", + if (unlikely(!page_info)) { + net_err_ratelimited("%s: %s invalid page_info in free\n", netdev->name, q->name); return; }
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(!page_info->page)) { + net_err_ratelimited("%s: %s invalid page in free\n", + netdev->name, q->name); + return; + } + + dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(page); + put_page(page_info->page); + page_info->dma_addr = 0; + page_info->page = NULL; }
void ionic_rx_fill(struct ionic_queue *q) @@@ -355,7 -338,7 +355,7 @@@
for (i = ionic_q_space_avail(q); i; i--) { remain_len = len; - desc_info = q->head; + desc_info = &q->info[q->head_idx]; desc = desc_info->desc; sg_desc = desc_info->sg_desc; page_info = &desc_info->pages[0]; @@@ -369,7 -352,8 +369,7 @@@ desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; desc_info->npages = nfrags; - page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); - if (unlikely(!page_info->page)) { + if (unlikely(ionic_rx_page_alloc(q, page_info))) { desc->addr = 0; desc->len = 0; return; @@@ -386,7 -370,8 +386,7 @@@ continue;
sg_elem = &sg_desc->elems[j]; - page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); - if (unlikely(!page_info->page)) { + if (unlikely(ionic_rx_page_alloc(q, page_info))) { sg_elem->addr = 0; sg_elem->len = 0; return; @@@ -402,7 -387,7 +402,7 @@@ }
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, - q->dbval | q->head->index); + q->dbval | q->head_idx); }
static void ionic_rx_fill_cb(void *arg) @@@ -412,23 -397,25 +412,23 @@@
void ionic_rx_empty(struct ionic_queue *q) { - struct ionic_desc_info *cur; + struct ionic_desc_info *desc_info; struct ionic_rxq_desc *desc; unsigned int i; + u16 idx;
- for (cur = q->tail; cur != q->head; cur = cur->next) { - desc = cur->desc; + idx = q->tail_idx; + while (idx != q->head_idx) { + desc_info = &q->info[idx]; + desc = desc_info->desc; desc->addr = 0; desc->len = 0;
- for (i = 0; i < cur->npages; i++) { - if (likely(cur->pages[i].page)) { - ionic_rx_page_free(q, cur->pages[i].page, - cur->pages[i].dma_addr); - cur->pages[i].page = NULL; - cur->pages[i].dma_addr = 0; - } - } + for (i = 0; i < desc_info->npages; i++) + ionic_rx_page_free(q, &desc_info->pages[i]);
- cur->cb_arg = NULL; + desc_info->cb_arg = NULL; + idx = (idx + 1) & (q->num_descs - 1); } }
@@@ -509,13 -496,11 +509,11 @@@ int ionic_txrx_napi(struct napi_struct struct ionic_cq *txcq; u32 rx_work_done = 0; u32 tx_work_done = 0; - u32 work_done = 0; u32 flags = 0; - bool unmask;
lif = rxcq->bound_q->lif; idev = &lif->ionic->idev; - txcq = &lif->txqcqs[qi].qcq->cq; + txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget, ionic_tx_service, NULL, NULL); @@@ -525,17 -510,12 +523,12 @@@ if (rx_work_done) ionic_rx_fill_cb(rxcq->bound_q);
- unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); - - if (unmask && napi_complete_done(napi, rx_work_done)) { + if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { flags |= IONIC_INTR_CRED_UNMASK; DEBUG_STATS_INTR_REARM(rxcq->bound_intr); - work_done = rx_work_done; - } else { - work_done = budget; }
- if (work_done || flags) { + if (rx_work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, tx_work_done + rx_work_done, flags); @@@ -544,7 -524,7 +537,7 @@@ DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
- return work_done; + return rx_work_done; }
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, @@@ -635,7 -615,6 +628,7 @@@ static bool ionic_tx_service(struct ion struct ionic_txq_comp *comp = cq_info->cq_desc; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; + u16 index;
if (!color_match(comp->color, cq->done_color)) return false; @@@ -644,13 -623,12 +637,13 @@@ * several q entries completed for each cq completion */ do { - desc_info = q->tail; - q->tail = desc_info->next; - ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg); + desc_info = &q->info[q->tail_idx]; + index = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); desc_info->cb = NULL; desc_info->cb_arg = NULL; - } while (desc_info->index != le16_to_cpu(comp->comp_index)); + } while (index != le16_to_cpu(comp->comp_index));
return true; } @@@ -670,14 -648,16 +663,14 @@@ void ionic_tx_flush(struct ionic_cq *cq void ionic_tx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; - int done = 0;
/* walk the not completed tx entries, if any */ - while (q->head != q->tail) { - desc_info = q->tail; - q->tail = desc_info->next; + while (q->head_idx != q->tail_idx) { + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); desc_info->cb = NULL; desc_info->cb_arg = NULL; - done++; } }
@@@ -761,8 -741,8 +754,8 @@@ static void ionic_tx_tso_post(struct io static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) { - struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; - struct ionic_txq_desc *desc = q->head->desc; + struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems; return desc; @@@ -771,13 -751,13 +764,13 @@@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_desc_info *abort = q->head; + struct ionic_desc_info *rewind_desc_info; struct device *dev = q->lif->ionic->dev; - struct ionic_desc_info *rewind = abort; struct ionic_txq_sg_elem *elem; struct ionic_txq_desc *desc; unsigned int frag_left = 0; unsigned int offset = 0; + u16 abort = q->head_idx; unsigned int len_left; dma_addr_t desc_addr; unsigned int hdrlen; @@@ -785,7 -765,6 +778,7 @@@ unsigned int seglen; u64 total_bytes = 0; u64 total_pkts = 0; + u16 rewind = abort; unsigned int left; unsigned int len; unsigned int mss; @@@ -930,20 -909,19 +923,20 @@@ return 0;
err_out_abort: - while (rewind->desc != q->head->desc) { - ionic_tx_clean(q, rewind, NULL, NULL); - rewind = rewind->next; + while (rewind != q->head_idx) { + rewind_desc_info = &q->info[rewind]; + ionic_tx_clean(q, rewind_desc_info, NULL, NULL); + rewind = (rewind + 1) & (q->num_descs - 1); } - q->head = abort; + q->head_idx = abort;
return -ENOMEM; }
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = q->head->desc; struct device *dev = q->lif->ionic->dev; dma_addr_t dma_addr; bool has_vlan; @@@ -982,8 -960,8 +975,8 @@@
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = q->head->desc; struct device *dev = q->lif->ionic->dev; dma_addr_t dma_addr; bool has_vlan; @@@ -1017,7 -995,7 +1010,7 @@@
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) { - struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; unsigned int len_left = skb->len - skb_headlen(skb); struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); @@@ -1126,9 -1104,9 +1119,9 @@@ netdev_tx_t ionic_start_xmit(struct sk_ return NETDEV_TX_OK; }
- if (unlikely(!lif_to_txqcq(lif, queue_index))) + if (unlikely(queue_index >= lif->nxqs)) queue_index = 0; - q = lif_to_txq(lif, queue_index); + q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb); if (ndescs < 0) diff --combined drivers/net/ethernet/qlogic/qed/qed_dev.c index 00f2d7f13de6,b8f076e4e6b8..f7f08e6a3acf --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@@ -3109,14 -3109,14 +3109,14 @@@ int qed_hw_init(struct qed_dev *cdev, s p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break;
- /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, @@@ -3973,7 -3973,6 +3973,7 @@@ static int qed_hw_get_nvm_info(struct q struct qed_mcp_link_speed_params *ext_speed; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; + int i;
/* Read global nvm_cfg address */ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); @@@ -4291,14 -4290,6 +4291,14 @@@ __set_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities);
+ /* Read device serial number information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, serial_number); + + for (i = 0; i < 4; i++) + p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4); + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); }
diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index db5d003770ba,f39f629242a1..5b149ceff6b6 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -39,7 -39,6 +39,7 @@@ #include "qed_hw.h" #include "qed_selftest.h" #include "qed_debug.h" +#include "qed_devlink.h"
#define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) @@@ -479,7 -478,6 +479,7 @@@ int qed_fill_dev_info(struct qed_dev *c }
dev_info->mtu = hw_info->mtu; + cdev->common_dev_info = *dev_info;
return 0; } @@@ -512,6 -510,107 +512,6 @@@ static int qed_set_power_state(struct q return 0; }
-struct qed_devlink { - struct qed_dev *cdev; -}; - -enum qed_devlink_param_id { - QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, - QED_DEVLINK_PARAM_ID_IWARP_CMT, -}; - -static int qed_dl_param_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) -{ - struct qed_devlink *qed_dl; - struct qed_dev *cdev; - - qed_dl = devlink_priv(dl); - cdev = qed_dl->cdev; - ctx->val.vbool = cdev->iwarp_cmt; - - return 0; -} - -static int qed_dl_param_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) -{ - struct qed_devlink *qed_dl; - struct qed_dev *cdev; - - qed_dl = devlink_priv(dl); - cdev = qed_dl->cdev; - cdev->iwarp_cmt = ctx->val.vbool; - - return 0; -} - -static const struct devlink_param qed_devlink_params[] = { - DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, - "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, - BIT(DEVLINK_PARAM_CMODE_RUNTIME), - qed_dl_param_get, qed_dl_param_set, NULL), -}; - -static const struct devlink_ops qed_dl_ops; - -static int qed_devlink_register(struct qed_dev *cdev) -{ - union devlink_param_value value; - struct qed_devlink *qed_dl; - struct devlink *dl; - int rc; - - dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl)); - if (!dl) - return -ENOMEM; - - qed_dl = devlink_priv(dl); - - cdev->dl = dl; - qed_dl->cdev = cdev; - - rc = devlink_register(dl, &cdev->pdev->dev); - if (rc) - goto err_free; - - rc = devlink_params_register(dl, qed_devlink_params, - ARRAY_SIZE(qed_devlink_params)); - if (rc) - goto err_unregister; - - value.vbool = false; - devlink_param_driverinit_value_set(dl, - QED_DEVLINK_PARAM_ID_IWARP_CMT, - value); - - devlink_params_publish(dl); - cdev->iwarp_cmt = false; - - return 0; - -err_unregister: - devlink_unregister(dl); - -err_free: - cdev->dl = NULL; - devlink_free(dl); - - return rc; -} - -static void qed_devlink_unregister(struct qed_dev *cdev) -{ - if (!cdev->dl) - return; - - devlink_params_unregister(cdev->dl, qed_devlink_params, - ARRAY_SIZE(qed_devlink_params)); - - devlink_unregister(cdev->dl); - devlink_free(cdev->dl); -} - /* probing */ static struct qed_dev *qed_probe(struct pci_dev *pdev, struct qed_probe_params *params) @@@ -540,6 -639,12 +540,6 @@@ } DP_INFO(cdev, "PCI init completed successfully\n");
- rc = qed_devlink_register(cdev); - if (rc) { - DP_INFO(cdev, "Failed to register devlink.\n"); - goto err2; - } - rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); if (rc) { DP_ERR(cdev, "hw prepare failed\n"); @@@ -569,6 -674,8 +569,6 @@@ static void qed_remove(struct qed_dev *
qed_set_power_state(cdev, PCI_D3hot);
- qed_devlink_unregister(cdev); - qed_free_cdev(cdev); }
@@@ -654,7 -761,7 +654,7 @@@ static int qed_set_int_mode(struct qed_ kfree(int_params->msix_table); if (force_mode) goto out; - /* Fallthrough */ + fallthrough;
case QED_INT_MODE_MSI: if (cdev->num_hwfns == 1) { @@@ -668,7 -775,7 +668,7 @@@ if (force_mode) goto out; } - /* Fallthrough */ + fallthrough;
case QED_INT_MODE_INTA: int_params->out.int_mode = QED_INT_MODE_INTA; @@@ -2817,7 -2924,7 +2817,7 @@@ static int qed_set_led(struct qed_dev * return status; }
-static int qed_recovery_process(struct qed_dev *cdev) +int qed_recovery_process(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; @@@ -3005,9 -3112,6 +3005,9 @@@ const struct qed_common_ops qed_common_ .get_link = &qed_get_current_link, .drain = &qed_drain, .update_msglvl = &qed_init_dp, + .devlink_register = qed_devlink_register, + .devlink_unregister = qed_devlink_unregister, + .report_fatal_error = qed_report_fatal_error, .dbg_all_data = &qed_dbg_all_data, .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, diff --combined drivers/net/ethernet/realtek/r8169_main.c index c427865d51a4,fc9e6626db55..9e4e6a883877 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@@ -617,6 -617,7 +617,6 @@@ struct rtl8169_private struct work_struct work; } wk;
- unsigned irq_enabled:1; unsigned supports_gmii:1; unsigned aspm_manageable:1; dma_addr_t counters_phys_addr; @@@ -1279,10 -1280,12 +1279,10 @@@ static void rtl_irq_disable(struct rtl8 RTL_W32(tp, IntrMask_8125, 0); else RTL_W16(tp, IntrMask, 0); - tp->irq_enabled = 0; }
static void rtl_irq_enable(struct rtl8169_private *tp) { - tp->irq_enabled = 1; if (rtl_is_8125(tp)) RTL_W32(tp, IntrMask_8125, tp->irq_mask); else @@@ -4538,7 -4541,8 +4538,7 @@@ static irqreturn_t rtl8169_interrupt(in struct rtl8169_private *tp = dev_instance; u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || (status & 0xffff) == 0xffff || - !(status & tp->irq_mask)) + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) return IRQ_NONE;
if (unlikely(status & SYSErr)) { @@@ -4592,8 -4596,10 +4592,8 @@@ static int rtl8169_poll(struct napi_str
rtl_tx(dev, tp, budget);
- if (work_done < budget) { - napi_complete_done(napi, work_done); + if (work_done < budget && napi_complete_done(napi, work_done)) rtl_irq_enable(tp); - }
return work_done; } @@@ -4988,7 -4994,7 +4988,7 @@@ static int rtl_alloc_irq(struct rtl8169 rtl_unlock_config_regs(tp); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; @@@ -5131,7 -5137,7 +5131,7 @@@ static void rtl_hw_initialize(struct rt switch (tp->mac_version) { case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_stop_cmac(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; diff --combined drivers/net/ethernet/renesas/ravb_main.c index adc8c8f3b5fc,df89d09b253e..f684296df871 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -162,7 -162,7 +162,7 @@@ static int ravb_get_mdio_data(struct md }
/* MDIO bus control struct */ -static struct mdiobb_ops bb_ops = { +static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = ravb_set_mdc, .set_mdio_dir = ravb_set_mdio_dir, @@@ -1342,6 -1342,51 +1342,51 @@@ static inline int ravb_hook_irq(unsigne return error; }
+ /* MDIO bus init function */ + static int ravb_mdio_init(struct ravb_private *priv) + { + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + + out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; + } + + /* MDIO bus release function */ + static int ravb_mdio_release(struct ravb_private *priv) + { + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; + } + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@@ -1350,6 -1395,13 +1395,13 @@@ struct device *dev = &pdev->dev; int error;
+ /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + netdev_err(ndev, "failed to initialize MDIO\n"); + return error; + } + napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]);
@@@ -1427,6 -1479,7 +1479,7 @@@ out_free_irq out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); return error; }
@@@ -1736,6 -1789,8 +1789,8 @@@ static int ravb_close(struct net_devic ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC);
+ ravb_mdio_release(priv); + return 0; }
@@@ -1887,51 -1942,6 +1942,6 @@@ static const struct net_device_ops ravb .ndo_set_features = ravb_set_features, };
- /* MDIO bus init function */ - static int ravb_mdio_init(struct ravb_private *priv) - { - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - - out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; - } - - /* MDIO bus release function */ - static int ravb_mdio_release(struct ravb_private *priv) - { - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; - } - static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@@ -2174,13 -2184,6 +2184,6 @@@ static int ravb_probe(struct platform_d eth_hw_addr_random(ndev); }
- /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; - } - netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
@@@ -2202,8 -2205,6 +2205,6 @@@ out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); - out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma);
@@@ -2235,7 -2236,6 +2236,6 @@@ static int ravb_remove(struct platform_ unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); diff --combined drivers/net/ethernet/sfc/farch.c index 0d9795fb9356,4002f9a3ae90..a48a931ad0e8 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@@ -863,8 -863,13 +863,8 @@@ static u16 efx_farch_handle_rx_not_ok(s bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; bool rx_ev_frm_trunc, rx_ev_tobe_disc; bool rx_ev_other_err, rx_ev_pause_frm; - bool rx_ev_hdr_type, rx_ev_mcast_pkt; - unsigned rx_ev_pkt_type;
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); - rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, @@@ -913,8 -918,6 +913,8 @@@ rx_ev_tobe_disc ? " [TOBE_DISC]" : "", rx_ev_pause_frm ? " [PAUSE]" : ""); } +#else + (void) rx_ev_other_err; #endif
if (efx->net_dev->features & NETIF_F_RXALL) @@@ -1035,10 -1038,10 +1035,10 @@@ efx_farch_handle_rx_event(struct efx_ch switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags |= EFX_RX_PKT_TCP; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags |= EFX_RX_PKT_CSUMMED; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break; @@@ -1313,7 -1316,7 +1313,7 @@@ int efx_farch_ev_process(struct efx_cha if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; - /* else fall through */ + fallthrough; default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " @@@ -2040,7 -2043,7 +2040,7 @@@ efx_farch_filter_from_gen_spec(struct e EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): is_full = true; - /* fall through */ + fallthrough; case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { __be32 rhost, host1, host2; @@@ -2091,7 -2094,7 +2091,7 @@@
case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: is_full = true; - /* fall through */ + fallthrough; case EFX_FILTER_MATCH_LOC_MAC: spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : EFX_FARCH_FILTER_MAC_WILD); @@@ -2138,7 -2141,7 +2138,7 @@@ efx_farch_filter_to_gen_spec(struct efx case EFX_FARCH_FILTER_TCP_FULL: case EFX_FARCH_FILTER_UDP_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_TCP_WILD: case EFX_FARCH_FILTER_UDP_WILD: { __be32 host1, host2; @@@ -2182,7 -2185,7 +2182,7 @@@
case EFX_FARCH_FILTER_MAC_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_MAC_WILD: gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; if (is_full) @@@ -2589,6 -2592,7 +2589,6 @@@ int efx_farch_filter_remove_safe(struc enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; unsigned int filter_idx; - struct efx_farch_filter_spec *spec; int rc;
table_id = efx_farch_filter_id_table_id(filter_id); @@@ -2600,6 -2604,7 +2600,6 @@@ if (filter_idx >= table->size) return -ENOENT; down_write(&state->lock); - spec = &table->spec[filter_idx];
rc = efx_farch_filter_remove(efx, table, filter_idx, priority); up_write(&state->lock); diff --combined drivers/net/ethernet/sun/sungem.c index b7093975b14c,8deb943ca5de..58f142ee78a3 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@@ -2712,7 -2712,7 +2712,7 @@@ static int gem_ioctl(struct net_device switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; - /* Fallthrough... */ + fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, @@@ -2965,8 -2965,9 +2965,8 @@@ static int gem_init_one(struct pci_dev /* It is guaranteed that the returned buffer will be at least * PAGE_SIZE aligned. */ - gp->init_block = (struct gem_init_block *) - dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), - &gp->gblock_dvma, GFP_KERNEL); + gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), + &gp->gblock_dvma, GFP_KERNEL); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; diff --combined drivers/net/gtp.c index 2ed1e82a8ad8,8e47d0112e5d..611722eafed8 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@@ -928,8 -928,8 +928,8 @@@ static void ipv4_pdp_fill(struct pdp_ct } }
-static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) +static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; @@@ -956,12 -956,12 +956,12 @@@
if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) - return -EEXIST; + return ERR_PTR(-EEXIST); if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) - return -EOPNOTSUPP; + return ERR_PTR(-EOPNOTSUPP);
if (pctx && pctx_tid) - return -EEXIST; + return ERR_PTR(-EEXIST); if (!pctx) pctx = pctx_tid;
@@@ -974,13 -974,13 +974,13 @@@ netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- return 0; + return pctx;
}
pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC); if (pctx == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM);
sock_hold(sk); pctx->sk = sk; @@@ -1018,7 -1018,7 +1018,7 @@@ break; }
- return 0; + return pctx; }
static void pdp_context_free(struct rcu_head *head) @@@ -1036,12 -1036,9 +1036,12 @@@ static void pdp_context_delete(struct p call_rcu(&pctx->rcu_head, pdp_context_free); }
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation); + static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) { unsigned int version; + struct pdp_ctx *pctx; struct gtp_dev *gtp; struct sock *sk; int err; @@@ -1071,6 -1068,7 +1071,6 @@@ }
rtnl_lock(); - rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); if (!gtp) { @@@ -1090,15 -1088,10 +1090,15 @@@ goto out_unlock; }
- err = gtp_pdp_add(gtp, sk, info); + pctx = gtp_pdp_add(gtp, sk, info); + if (IS_ERR(pctx)) { + err = PTR_ERR(pctx); + } else { + gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL); + err = 0; + }
out_unlock: - rcu_read_unlock(); rtnl_unlock(); return err; } @@@ -1166,7 -1159,6 +1166,7 @@@ static int gtp_genl_del_pdp(struct sk_b netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+ gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC); pdp_context_delete(pctx);
out_unlock: @@@ -1176,14 -1168,6 +1176,14 @@@
static struct genl_family gtp_genl_family;
+enum gtp_multicast_groups { + GTP_GENL_MCGRP, +}; + +static const struct genl_multicast_group gtp_genl_mcgrps[] = { + [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME }, +}; + static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct pdp_ctx *pctx) { @@@ -1195,6 -1179,7 +1195,7 @@@ goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; @@@ -1220,26 -1205,6 +1221,26 @@@ nla_put_failure return -EMSGSIZE; }
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation) +{ + struct sk_buff *msg; + int ret; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation); + if (!msg) + return -ENOMEM; + + ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx); + if (ret < 0) { + nlmsg_free(msg); + return ret; + } + + ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg, + 0, GTP_GENL_MCGRP, GFP_ATOMIC); + return ret; +} + static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx = NULL; @@@ -1370,8 -1335,6 +1371,8 @@@ static struct genl_family gtp_genl_fami .module = THIS_MODULE, .ops = gtp_genl_ops, .n_ops = ARRAY_SIZE(gtp_genl_ops), + .mcgrps = gtp_genl_mcgrps, + .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps), };
static int __net_init gtp_net_init(struct net *net) diff --combined drivers/net/phy/dp83640.c index fc3d747eba55,79e67f2fe00a..f2caccaf4408 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@@ -766,13 -766,13 +766,13 @@@ static int decode_evnt(struct dp83640_p switch (words) { case 3: dp83640->edata.sec_hi = phy_txts->sec_hi; - /* fall through */ + fallthrough; case 2: dp83640->edata.sec_lo = phy_txts->sec_lo; - /* fall through */ + fallthrough; case 1: dp83640->edata.ns_hi = phy_txts->ns_hi; - /* fall through */ + fallthrough; case 0: dp83640->edata.ns_lo = phy_txts->ns_lo; } @@@ -798,32 -798,51 +798,32 @@@ return parsed; }
-#define DP83640_PACKET_HASH_OFFSET 20 #define DP83640_PACKET_HASH_LEN 10
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) { - unsigned int offset = 0; - u8 *msgtype, *data = skb_mac_header(skb); - __be16 *seqid; + struct ptp_header *hdr; + u8 msgtype; + u16 seqid; u16 hash;
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
- if (type & PTP_CLASS_VLAN) - offset += VLAN_HLEN; - - switch (type & PTP_CLASS_PMASK) { - case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; - break; - case PTP_CLASS_IPV6: - offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; - break; - case PTP_CLASS_L2: - offset += ETH_HLEN; - break; - default: + hdr = ptp_parse_header(skb, type); + if (!hdr) return 0; - }
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid)) - return 0; + msgtype = ptp_get_msgtype(hdr, type);
- if (unlikely(type & PTP_CLASS_V1)) - msgtype = data + offset + OFF_PTP_CONTROL; - else - msgtype = data + offset; - if (rxts->msgtype != (*msgtype & 0xf)) + if (rxts->msgtype != (msgtype & 0xf)) return 0;
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID); - if (rxts->seqid != ntohs(*seqid)) + seqid = be16_to_cpu(hdr->sequence_id); + if (rxts->seqid != seqid) return 0;
hash = ether_crc(DP83640_PACKET_HASH_LEN, - data + offset + DP83640_PACKET_HASH_OFFSET) >> 20; + (unsigned char *)&hdr->source_port_identity) >> 20; if (rxts->hash != hash) return 0;
@@@ -963,16 -982,35 +963,16 @@@ static void decode_status_frame(struct
static int is_sync(struct sk_buff *skb, int type) { - u8 *data = skb->data, *msgtype; - unsigned int offset = 0; - - if (type & PTP_CLASS_VLAN) - offset += VLAN_HLEN; - - switch (type & PTP_CLASS_PMASK) { - case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; - break; - case PTP_CLASS_IPV6: - offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; - break; - case PTP_CLASS_L2: - offset += ETH_HLEN; - break; - default: - return 0; - } - - if (type & PTP_CLASS_V1) - offset += OFF_PTP_CONTROL; + struct ptp_header *hdr; + u8 msgtype;
- if (skb->len < offset + 1) + hdr = ptp_parse_header(skb, type); + if (!hdr) return 0;
- msgtype = data + offset; + msgtype = ptp_get_msgtype(hdr, type);
- return (*msgtype & 0xf) == 0; + return (msgtype & 0xf) == 0; }
static void dp83640_free_clocks(void) @@@ -1371,7 -1409,7 +1371,7 @@@ static void dp83640_txtstamp(struct mii kfree_skb(skb); return; } - /* fall through */ + fallthrough; case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; diff --combined drivers/net/phy/dp83867.c index ca26ccc6dfa4,cd7032628a28..69d3eacc2b96 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@@ -1,5 -1,6 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 -/* - * Driver for the Texas Instruments DP83867 PHY +/* Driver for the Texas Instruments DP83867 PHY * * Copyright (C) 2015 Texas Instruments Inc. */ @@@ -112,6 -113,7 +112,6 @@@ #define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0 #define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
- /* IO_MUX_CFG bits */ #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0 @@@ -213,9 -215,9 +213,9 @@@ static int dp83867_set_wol(struct phy_d if (wol->wolopts & WAKE_MAGICSECURE) { phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, (wol->sopass[1] << 8) | wol->sopass[0]); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP2, (wol->sopass[3] << 8) | wol->sopass[2]); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP3, (wol->sopass[5] << 8) | wol->sopass[4]);
val_rxcfg |= DP83867_WOL_SEC_EN; @@@ -382,22 -384,22 +382,22 @@@ static int dp83867_set_downshift(struc DP83867_DOWNSHIFT_EN);
switch (cnt) { - case DP83867_DOWNSHIFT_1_COUNT: - count = DP83867_DOWNSHIFT_1_COUNT_VAL; - break; - case DP83867_DOWNSHIFT_2_COUNT: - count = DP83867_DOWNSHIFT_2_COUNT_VAL; - break; - case DP83867_DOWNSHIFT_4_COUNT: - count = DP83867_DOWNSHIFT_4_COUNT_VAL; - break; - case DP83867_DOWNSHIFT_8_COUNT: - count = DP83867_DOWNSHIFT_8_COUNT_VAL; - break; - default: - phydev_err(phydev, - "Downshift count must be 1, 2, 4 or 8\n"); - return -EINVAL; + case DP83867_DOWNSHIFT_1_COUNT: + count = DP83867_DOWNSHIFT_1_COUNT_VAL; + break; + case DP83867_DOWNSHIFT_2_COUNT: + count = DP83867_DOWNSHIFT_2_COUNT_VAL; + break; + case DP83867_DOWNSHIFT_4_COUNT: + count = DP83867_DOWNSHIFT_4_COUNT_VAL; + break; + case DP83867_DOWNSHIFT_8_COUNT: + count = DP83867_DOWNSHIFT_8_COUNT_VAL; + break; + default: + phydev_err(phydev, + "Downshift count must be 1, 2, 4 or 8\n"); + return -EINVAL; }
val = DP83867_DOWNSHIFT_EN; @@@ -409,7 -411,7 +409,7 @@@ }
static int dp83867_get_tunable(struct phy_device *phydev, - struct ethtool_tunable *tuna, void *data) + struct ethtool_tunable *tuna, void *data) { switch (tuna->id) { case ETHTOOL_PHY_DOWNSHIFT: @@@ -420,7 -422,7 +420,7 @@@ }
static int dp83867_set_tunable(struct phy_device *phydev, - struct ethtool_tunable *tuna, const void *data) + struct ethtool_tunable *tuna, const void *data) { switch (tuna->id) { case ETHTOOL_PHY_DOWNSHIFT: @@@ -522,10 -524,11 +522,10 @@@ static int dp83867_of_init(struct phy_d dp83867->io_impedance = -1; /* leave at default */
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node, - "ti,dp83867-rxctrl-strap-quirk"); + "ti,dp83867-rxctrl-strap-quirk");
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node, - "ti,sgmii-ref-clock-output-enable"); - + "ti,sgmii-ref-clock-output-enable");
dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV; ret = of_property_read_u32(of_node, "ti,rx-internal-delay", diff --combined drivers/net/phy/phylink.c index d0738302a958,32f4e8ec96cf..fe2296fdda19 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@@ -535,10 -535,8 +535,10 @@@ static void phylink_mac_pcs_get_state(s
if (pl->pcs_ops) pl->pcs_ops->pcs_get_state(pl->pcs, state); - else + else if (pl->mac_ops->mac_pcs_get_state) pl->mac_ops->mac_pcs_get_state(pl->config, state); + else + state->link = 0; }
/* The fixed state is... fixed except for the link state, @@@ -1907,7 -1905,7 +1907,7 @@@ int phylink_mii_ioctl(struct phylink *p switch (cmd) { case SIOCGMIIPHY: mii->phy_id = pl->phydev->mdio.addr; - /* fall through */ + fallthrough;
case SIOCGMIIREG: ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); @@@ -1930,7 -1928,7 +1930,7 @@@ switch (cmd) { case SIOCGMIIPHY: mii->phy_id = 0; - /* fall through */ + fallthrough;
case SIOCGMIIREG: ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); @@@ -2320,49 -2318,6 +2320,49 @@@ static void phylink_decode_sgmii_word(s state->duplex = DUPLEX_HALF; }
+/** + * phylink_decode_usxgmii_word() - decode the USXGMII word from a MAC PCS + * @state: a pointer to a struct phylink_link_state. + * @lpa: a 16 bit value which stores the USXGMII auto-negotiation word + * + * Helper for MAC PCS supporting the USXGMII protocol and the auto-negotiation + * code word. Decode the USXGMII code word and populate the corresponding fields + * (speed, duplex) into the phylink_link_state structure. + */ +void phylink_decode_usxgmii_word(struct phylink_link_state *state, + uint16_t lpa) +{ + switch (lpa & MDIO_USXGMII_SPD_MASK) { + case MDIO_USXGMII_10: + state->speed = SPEED_10; + break; + case MDIO_USXGMII_100: + state->speed = SPEED_100; + break; + case MDIO_USXGMII_1000: + state->speed = SPEED_1000; + break; + case MDIO_USXGMII_2500: + state->speed = SPEED_2500; + break; + case MDIO_USXGMII_5000: + state->speed = SPEED_5000; + break; + case MDIO_USXGMII_10G: + state->speed = SPEED_10000; + break; + default: + state->link = false; + return; + } + + if (lpa & MDIO_USXGMII_FULL_DUPLEX) + state->duplex = DUPLEX_FULL; + else + state->duplex = DUPLEX_HALF; +} +EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); + /** * phylink_mii_c22_pcs_get_state() - read the MAC PCS state * @pcs: a pointer to a &struct mdio_device. @@@ -2406,7 -2361,6 +2406,7 @@@ void phylink_mii_c22_pcs_get_state(stru break;
case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: phylink_decode_sgmii_word(state, lpa); break;
diff --combined drivers/net/phy/sfp.c index 5250dcdf46a4,cf83314c8591..1d18c10e8f82 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@@ -7,7 -7,6 +7,7 @@@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/jiffies.h> +#include <linux/mdio/mdio-i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> @@@ -17,6 -16,7 +17,6 @@@ #include <linux/slab.h> #include <linux/workqueue.h>
-#include "mdio-i2c.h" #include "sfp.h" #include "swphy.h"
@@@ -552,7 -552,7 +552,7 @@@ static umode_t sfp_hwmon_is_visible(con case hwmon_temp_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_temp_input: case hwmon_temp_label: return 0444; @@@ -571,7 -571,7 +571,7 @@@ case hwmon_in_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_in_input: case hwmon_in_label: return 0444; @@@ -590,7 -590,7 +590,7 @@@ case hwmon_curr_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_curr_input: case hwmon_curr_label: return 0444; @@@ -618,7 -618,7 +618,7 @@@ case hwmon_power_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_power_input: case hwmon_power_label: return 0444; @@@ -1872,7 -1872,7 +1872,7 @@@ static void sfp_sm_module(struct sfp *s dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0); - /* fall through */ + fallthrough; case SFP_MOD_WAITDEV: /* Ensure that the device is attached before proceeding */ if (sfp->sm_dev_state < SFP_DEV_DOWN) @@@ -1890,7 -1890,7 +1890,7 @@@ goto insert;
sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0); - /* fall through */ + fallthrough; case SFP_MOD_HPOWER: /* Enable high power mode */ err = sfp_sm_mod_hpower(sfp, true); diff --combined drivers/net/tun.c index efaef83b8897,7959b5c2d11f..be69d272052f --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -219,6 -219,24 +219,6 @@@ struct veth __be16 h_vlan_TCI; };
-bool tun_is_xdp_frame(void *ptr) -{ - return (unsigned long)ptr & TUN_XDP_FLAG; -} -EXPORT_SYMBOL(tun_is_xdp_frame); - -void *tun_xdp_to_ptr(void *ptr) -{ - return (void *)((unsigned long)ptr | TUN_XDP_FLAG); -} -EXPORT_SYMBOL(tun_xdp_to_ptr); - -void *tun_ptr_to_xdp(void *ptr) -{ - return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); -} -EXPORT_SYMBOL(tun_ptr_to_xdp); - static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@@ -1572,10 -1590,10 +1572,10 @@@ static int tun_xdp_act(struct tun_struc break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: this_cpu_inc(tun->pcpu_stats->rx_dropped); break; @@@ -2399,7 -2417,7 +2399,7 @@@ static int tun_xdp_one(struct tun_struc switch (err) { case XDP_REDIRECT: *flush = true; - /* fall through */ + fallthrough; case XDP_TX: return 0; case XDP_PASS: diff --combined drivers/net/usb/Kconfig index 0863f01937b3,c7bcfca7d70b..b46993d5f997 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@@ -252,6 -252,7 +252,7 @@@ config USB_NET_CDC_EE config USB_NET_CDC_NCM tristate "CDC NCM support" depends on USB_USBNET + select USB_NET_CDCETHER default y help This driver provides support for CDC NCM (Network Control Model @@@ -345,8 -346,6 +346,8 @@@ config USB_NET_SMSC75X config USB_NET_SMSC95XX tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices" depends on USB_USBNET + select PHYLIB + select SMSC_PHY select BITREVERSE select CRC16 select CRC32 diff --combined drivers/net/veth.c index b80cbffeb88e,a475f48d43c4..7de8f0ea3f6b --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@@ -234,14 -234,14 +234,14 @@@ static bool veth_is_xdp_frame(void *ptr return (unsigned long)ptr & VETH_XDP_FLAG; }
-static void *veth_ptr_to_xdp(void *ptr) +static struct xdp_frame *veth_ptr_to_xdp(void *ptr) { return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); }
-static void *veth_xdp_to_ptr(void *ptr) +static void *veth_xdp_to_ptr(struct xdp_frame *xdp) { - return (void *)((unsigned long)ptr | VETH_XDP_FLAG); + return (void *)((unsigned long)xdp | VETH_XDP_FLAG); }
static void veth_ptr_free(void *ptr) @@@ -610,10 -610,10 +610,10 @@@ static struct sk_buff *veth_xdp_rcv_one goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto err_xdp; @@@ -745,10 -745,10 +745,10 @@@ static struct sk_buff *veth_xdp_rcv_skb goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; diff --combined drivers/s390/net/qeth_core_main.c index 26bc8c15ffb8,6a7398251423..e19640bc6daa --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -1071,7 -1071,7 +1071,7 @@@ static void qeth_issue_next_read_cb(str break; case -EIO: qeth_schedule_recovery(card); - /* fall through */ + fallthrough; default: qeth_clear_ipacmd_list(card); goto err_idx; @@@ -2702,7 -2702,6 +2702,7 @@@ static int qeth_alloc_qdio_queues(struc card->qdio.out_qs[i] = queue; queue->card = card; queue->queue_no = i; + spin_lock_init(&queue->lock); timer_setup(&queue->timer, qeth_tx_completion_timer, 0); queue->coalesce_usecs = QETH_TX_COALESCE_USECS; queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; @@@ -2887,7 -2886,7 +2887,7 @@@ void qeth_print_status_message(struct q card->info.mcl_level[3]); break; } - /* fallthrough */ + fallthrough; case QETH_CARD_TYPE_IQD: if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) @@@ -3069,6 -3068,7 +3069,6 @@@ static int qeth_init_qdio_queues(struc queue->bulk_max = qeth_tx_select_bulk_max(card, queue); atomic_set(&queue->used_buffers, 0); atomic_set(&queue->set_pci_flags_count, 0); - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); } return 0; @@@ -3549,9 -3549,8 +3549,9 @@@ static unsigned int qeth_rx_refill_queu
static void qeth_buffer_reclaim_work(struct work_struct *work) { - struct qeth_card *card = container_of(work, struct qeth_card, - buffer_reclaim_work.work); + struct qeth_card *card = container_of(to_delayed_work(work), + struct qeth_card, + buffer_reclaim_work);
local_bh_disable(); napi_schedule(&card->napi); @@@ -3741,31 -3740,37 +3741,31 @@@ static void qeth_flush_queue(struct qet
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { - int index; - int flush_cnt = 0; - int q_was_packing = 0; - /* * check if weed have to switch to non-packing mode or if * we have to get a pci flag out on the queue */ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || !atomic_read(&queue->set_pci_flags_count)) { - if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == - QETH_OUT_Q_UNLOCKED) { - /* - * If we get in here, there was no action in - * do_send_packet. So, we check if there is a - * packing buffer to be flushed here. - */ - index = queue->next_buf_to_fill; - q_was_packing = queue->do_pack; - /* queue->do_pack may change */ - barrier(); - flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); - if (!flush_cnt && - !atomic_read(&queue->set_pci_flags_count)) - flush_cnt += qeth_prep_flush_pack_buffer(queue); + unsigned int index, flush_cnt; + bool q_was_packing; + + spin_lock(&queue->lock); + + index = queue->next_buf_to_fill; + q_was_packing = queue->do_pack; + + flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); + if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) + flush_cnt = qeth_prep_flush_pack_buffer(queue); + + if (flush_cnt) { + qeth_flush_buffers(queue, index, flush_cnt); if (q_was_packing) QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); - if (flush_cnt) - qeth_flush_buffers(queue, index, flush_cnt); - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } + + spin_unlock(&queue->lock); } }
@@@ -4277,22 -4282,29 +4277,22 @@@ int qeth_do_send_packet(struct qeth_car unsigned int offset, unsigned int hd_len, int elements_needed) { + unsigned int start_index = queue->next_buf_to_fill; struct qeth_qdio_out_buffer *buffer; unsigned int next_element; struct netdev_queue *txq; bool stopped = false; - int start_index; int flush_count = 0; int do_pack = 0; - int tmp; int rc = 0;
- /* spin until we get the queue ... */ - while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); - start_index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill];
/* Just a sanity check, the wake/stop logic should ensure that we always * get a free buffer. */ - if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - }
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
@@@ -4315,6 -4327,8 +4315,6 @@@ QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, flush_count); - atomic_set(&queue->state, - QETH_OUT_Q_UNLOCKED); rc = -EBUSY; goto out; } @@@ -4346,8 -4360,31 +4346,8 @@@
if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); - else if (!atomic_read(&queue->set_pci_flags_count)) - atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); - /* - * queue->state will go from LOCKED -> UNLOCKED or from - * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us - * (switch packing state or flush buffer to get another pci flag out). - * In that case we will enter this loop - */ - while (atomic_dec_return(&queue->state)) { - start_index = queue->next_buf_to_fill; - /* check if we can go back to non-packing state */ - tmp = qeth_switch_to_nonpacking_if_needed(queue); - /* - * check if we need to flush a packing buffer to get a pci - * flag out on the queue - */ - if (!tmp && !atomic_read(&queue->set_pci_flags_count)) - tmp = qeth_prep_flush_pack_buffer(queue); - if (tmp) { - qeth_flush_buffers(queue, start_index, tmp); - flush_count += tmp; - } - } + out: - /* at this point the queue is UNLOCKED again */ if (do_pack) QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
@@@ -4421,10 -4458,8 +4421,10 @@@ int qeth_xmit(struct qeth_card *card, s } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); + spin_lock(&queue->lock); rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, hd_len, elements); + spin_unlock(&queue->lock); }
if (rc && !push_len) diff --combined drivers/s390/net/qeth_l2_main.c index b5bef5345dd6,3a94f6cad167..491578009f12 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@@ -273,17 -273,6 +273,17 @@@ static int qeth_l2_vlan_rx_kill_vid(str return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); }
+static void qeth_l2_set_pnso_mode(struct qeth_card *card, + enum qeth_pnso_mode mode) +{ + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + WRITE_ONCE(card->info.pnso_mode, mode); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + if (mode == QETH_PNSO_NONE) + drain_workqueue(card->event_wq); +} + static void qeth_l2_stop_card(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "stopcard"); @@@ -301,7 -290,7 +301,7 @@@
qeth_qdio_clear_card(card, 0); qeth_clear_working_pool_list(card); - flush_workqueue(card->event_wq); + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); qeth_flush_local_addrs(card); card->info.promisc_mode = 0; } @@@ -499,7 -488,7 +499,7 @@@ static void qeth_l2_rx_mode_work(struc kfree(mac); break; } - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ mac->disp_flag = QETH_DISP_ADDR_DELETE; @@@ -821,6 -810,8 +821,6 @@@ static void qeth_l2_setup_bridgeport_at if (card->options.sbp.hostnotification) { if (qeth_bridgeport_an_set(card, 1)) card->options.sbp.hostnotification = 0; - } else { - qeth_bridgeport_an_set(card, 0); } }
@@@ -1099,14 -1090,15 +1099,14 @@@ static void qeth_bridge_emit_host_event struct qeth_bridge_state_data { struct work_struct worker; struct qeth_card *card; - struct qeth_sbp_state_change qports; + u8 role; + u8 state; };
static void qeth_bridge_state_change_worker(struct work_struct *work) { struct qeth_bridge_state_data *data = container_of(work, struct qeth_bridge_state_data, worker); - /* We are only interested in the first entry - local port */ - struct qeth_sbp_port_entry *entry = &data->qports.entry[0]; char env_locrem[32]; char env_role[32]; char env_state[32]; @@@ -1117,16 -1109,22 +1117,16 @@@ NULL };
- /* Role should not change by itself, but if it did, */ - /* information from the hardware is authoritative. */ - mutex_lock(&data->card->sbp_lock); - data->card->options.sbp.role = entry->role; - mutex_unlock(&data->card->sbp_lock); - snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_role, sizeof(env_role), "ROLE=%s", - (entry->role == QETH_SBP_ROLE_NONE) ? "none" : - (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : - (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : + (data->role == QETH_SBP_ROLE_NONE) ? "none" : + (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : + (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : "<INVALID>"); snprintf(env_state, sizeof(env_state), "STATE=%s", - (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : - (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" : - (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" : + (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : + (data->state == QETH_SBP_STATE_STANDBY) ? "standby" : + (data->state == QETH_SBP_STATE_ACTIVE) ? "active" : "<INVALID>"); kobject_uevent_env(&data->card->gdev->dev.kobj, KOBJ_CHANGE, env); @@@ -1136,8 -1134,10 +1136,8 @@@ static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd) { - struct qeth_sbp_state_change *qports = - &cmd->data.sbp.data.state_change; + struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data; struct qeth_bridge_state_data *data; - int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng"); if (qports->num_entries == 0) { @@@ -1148,50 -1148,34 +1148,50 @@@ QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; } - extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; - data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize, - GFP_ATOMIC); + + data = kzalloc(sizeof(*data), GFP_ATOMIC); if (!data) { QETH_CARD_TEXT(card, 2, "BPSalloc"); return; } INIT_WORK(&data->worker, qeth_bridge_state_change_worker); data->card = card; - memcpy(&data->qports, qports, - sizeof(struct qeth_sbp_state_change) + extrasize); + /* Information for the local port: */ + data->role = qports->entry[0].role; + data->state = qports->entry[0].state; + queue_work(card->event_wq, &data->worker); }
struct qeth_addr_change_data { - struct work_struct worker; + struct delayed_work dwork; struct qeth_card *card; struct qeth_ipacmd_addr_change ac_event; };
static void qeth_addr_change_event_worker(struct work_struct *work) { - struct qeth_addr_change_data *data = - container_of(work, struct qeth_addr_change_data, worker); + struct delayed_work *dwork = to_delayed_work(work); + struct qeth_addr_change_data *data; + struct qeth_card *card; int i;
+ data = container_of(dwork, struct qeth_addr_change_data, dwork); + card = data->card; + QETH_CARD_TEXT(data->card, 4, "adrchgew"); + + if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) + goto free; + if (data->ac_event.lost_event_mask) { + /* Potential re-config in progress, try again later: */ + if (!mutex_trylock(&card->sbp_lock)) { + queue_delayed_work(card->event_wq, dwork, + msecs_to_jiffies(100)); + return; + } + dev_info(&data->card->gdev->dev, "Address change notification stopped on %s (%s)\n", data->card->dev->name, @@@ -1200,9 -1184,8 +1200,9 @@@ : (data->ac_event.lost_event_mask == 0x02) ? "Bridge port state change" : "Unknown reason"); - mutex_lock(&data->card->sbp_lock); + data->card->options.sbp.hostnotification = 0; + card->info.pnso_mode = QETH_PNSO_NONE; mutex_unlock(&data->card->sbp_lock); qeth_bridge_emit_host_event(data->card, anev_abort, 0, NULL, NULL); @@@ -1216,8 -1199,6 +1216,8 @@@ &entry->token, &entry->addr_lnid); } + +free: kfree(data); }
@@@ -1229,9 -1210,6 +1229,9 @@@ static void qeth_addr_change_event(stru struct qeth_addr_change_data *data; int extrasize;
+ if (card->info.pnso_mode == QETH_PNSO_NONE) + return; + QETH_CARD_TEXT(card, 4, "adrchgev"); if (cmd->hdr.return_code != 0x0000) { if (cmd->hdr.return_code == 0x0010) { @@@ -1251,11 -1229,11 +1251,11 @@@ QETH_CARD_TEXT(card, 2, "ACNalloc"); return; } - INIT_WORK(&data->worker, qeth_addr_change_event_worker); + INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker); data->card = card; memcpy(&data->ac_event, hostevs, sizeof(struct qeth_ipacmd_addr_change) + extrasize); - queue_work(card->event_wq, &data->worker); + queue_delayed_work(card->event_wq, &data->dwork, 0); }
/* SETBRIDGEPORT support; sending commands */ @@@ -1440,8 -1418,8 +1440,8 @@@ static int qeth_bridgeport_query_ports_ struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports; struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + struct qeth_sbp_port_data *qports; int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb"); @@@ -1449,7 -1427,6 +1449,7 @@@ if (rc) return rc;
+ qports = &cmd->data.sbp.data.port_data; if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); return -EINVAL; @@@ -1577,14 -1554,9 +1577,14 @@@ int qeth_bridgeport_an_set(struct qeth_
if (enable) { qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); + qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT); rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card); - } else + if (rc) + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + } else { rc = qeth_l2_pnso(card, 0, NULL, NULL); + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + } return rc; }
diff --combined drivers/s390/net/qeth_l3_main.c index 95df638de616,4d461960370d..767c5bb7c24c --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@@ -314,8 -314,7 +314,8 @@@ static int qeth_l3_setdelip_cb(struct q }
static int qeth_l3_send_setdelmc(struct qeth_card *card, - struct qeth_ipaddr *addr, int ipacmd) + struct qeth_ipaddr *addr, + enum qeth_ipa_cmds ipacmd) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@@ -1236,7 -1235,7 +1236,7 @@@ static void qeth_l3_rx_mode_work(struc break; } addr->ref_counter = 1; - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ addr->disp_flag = QETH_DISP_ADDR_DELETE; diff --combined fs/io_uring.c index 1fd03a38400c,0d7be2e9d005..522b891dd187 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@@ -1150,7 -1150,7 +1150,7 @@@ static void io_prep_async_work(struct i io_req_init_async(req);
if (req->flags & REQ_F_ISREG) { - if (def->hash_reg_file) + if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); } else { if (def->unbound_nonreg_file) @@@ -1746,7 -1746,8 +1746,8 @@@ static struct io_kiocb *io_req_find_nex return __io_req_find_next(req); }
- static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) + static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, + bool twa_signal_ok) { struct task_struct *tsk = req->task; struct io_ring_ctx *ctx = req->ctx; @@@ -1759,7 -1760,7 +1760,7 @@@ * will do the job. */ notify = 0; - if (!(ctx->flags & IORING_SETUP_SQPOLL)) + if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify); @@@ -1819,7 -1820,7 +1820,7 @@@ static void io_req_task_queue(struct io init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs);
- ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk;
@@@ -2048,6 -2049,7 +2049,7 @@@ static void io_iopoll_complete(struct i
req = list_first_entry(done, struct io_kiocb, inflight_entry); if (READ_ONCE(req->result) == -EAGAIN) { + req->result = 0; req->iopoll_completed = 0; list_move_tail(&req->inflight_entry, &again); continue; @@@ -2293,38 -2295,27 +2295,27 @@@ end_req io_req_complete(req, ret); return false; } - - static void io_rw_resubmit(struct callback_head *cb) - { - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - int err; - - err = io_sq_thread_acquire_mm(ctx, req); - - if (io_resubmit_prep(req, err)) { - refcount_inc(&req->refs); - io_queue_async_work(req); - } - - percpu_ref_put(&ctx->refs); - } #endif
static bool io_rw_reissue(struct io_kiocb *req, long res) { #ifdef CONFIG_BLOCK + umode_t mode = file_inode(req->file)->i_mode; int ret;
+ if (!S_ISBLK(mode) && !S_ISREG(mode)) + return false; if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) return false;
- init_task_work(&req->task_work, io_rw_resubmit); - percpu_ref_get(&req->ctx->refs); + ret = io_sq_thread_acquire_mm(req->ctx, req);
- ret = io_req_task_work_add(req, &req->task_work); - if (!ret) + if (io_resubmit_prep(req, ret)) { + refcount_inc(&req->refs); + io_queue_async_work(req); return true; + } + #endif return false; } @@@ -2563,7 -2554,7 +2554,7 @@@ static inline void io_rw_done(struct ki * IO with EINTR. */ ret = -EINTR; - /* fall through */ + fallthrough; default: kiocb->ki_complete(kiocb, ret, 0); } @@@ -2865,6 -2856,11 +2856,11 @@@ static ssize_t io_import_iovec(int rw, return iov_iter_count(&req->io->rw.iter); }
+ static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) + { + return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos; + } + /* * For files that don't have ->read_iter() and ->write_iter(), handle them * by looping over ->read() or ->write() manually. @@@ -2900,10 -2896,10 +2896,10 @@@ static ssize_t loop_rw_iter(int rw, str
if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); } else { nr = file->f_op->write(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); }
if (iov_iter_is_bvec(iter)) @@@ -3044,7 -3040,7 +3040,7 @@@ static int io_async_buf_func(struct wai
/* submit ref gets dropped, acquire a new one */ refcount_inc(&req->refs); - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk;
@@@ -3125,6 -3121,7 +3121,7 @@@ static int io_read(struct io_kiocb *req ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); io_size = ret; req->result = io_size; ret = 0; @@@ -3137,8 -3134,7 +3134,7 @@@ if (force_nonblock && !io_file_supports_async(req->file, READ)) goto copy_iov;
- iov_count = iov_iter_count(iter); - ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count); if (unlikely(ret)) goto out_free;
@@@ -3150,14 -3146,21 +3146,21 @@@ ret = 0; goto out_free; } else if (ret == -EAGAIN) { - if (!force_nonblock) + /* IOPOLL retry should happen for io-wq threads */ + if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) + goto done; + /* no retry on NONBLOCK marked file */ + if (req->file->f_flags & O_NONBLOCK) goto done; + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (ret) goto out_free; return -EAGAIN; } else if (ret < 0) { - goto out_free; + /* make sure -ERESTARTSYS -> -EINTR is done */ + goto done; }
/* read it all, or we did blocking attempt. no retry. */ @@@ -3241,6 -3244,7 +3244,7 @@@ static int io_write(struct io_kiocb *re ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); io_size = ret; req->result = io_size;
@@@ -3257,8 -3261,7 +3261,7 @@@ (req->flags & REQ_F_ISREG)) goto copy_iov;
- iov_count = iov_iter_count(iter); - ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count); if (unlikely(ret)) goto out_free;
@@@ -3290,10 -3293,19 +3293,19 @@@ */ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) ret2 = -EAGAIN; + /* no retry on NONBLOCK marked file */ + if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) + goto done; if (!force_nonblock || ret2 != -EAGAIN) { + /* IOPOLL retry should happen for io-wq threads */ + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) + goto copy_iov; + done: kiocb_done(kiocb, ret2, cs); } else { copy_iov: + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (!ret) return -EAGAIN; @@@ -4566,6 -4578,7 +4578,7 @@@ struct io_poll_table static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { + bool twa_signal_ok; int ret;
/* for instances that support it check for an event match first: */ @@@ -4580,13 -4593,21 +4593,21 @@@ init_task_work(&req->task_work, func); percpu_ref_get(&req->ctx->refs);
+ /* + * If we using the signalfd wait_queue_head for this wakeup, then + * it's not safe to use TWA_SIGNAL as we could be recursing on the + * tsk->sighand->siglock on doing the wakeup. Should not be needed + * either, as the normal wakeup will suffice. + */ + twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); + /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead * of executing it. We can't safely execute it anyway, as we may not * have the needed state needed for it anyway. */ - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok); if (unlikely(ret)) { struct task_struct *tsk;
@@@ -4875,12 -4896,20 +4896,20 @@@ static bool io_arm_poll_handler(struct struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; + int rw;
if (!req->file || !file_can_poll(req->file)) return false; if (req->flags & REQ_F_POLLED) return false; - if (!def->pollin && !def->pollout) + if (def->pollin) + rw = READ; + else if (def->pollout) + rw = WRITE; + else + return false; + /* if we can't nonblock try, then no point in arming a poll handler */ + if (!io_file_supports_async(req->file, rw)) return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); @@@ -4898,12 -4927,6 +4927,12 @@@ mask |= POLLIN | POLLRDNORM; if (def->pollout) mask |= POLLOUT | POLLWRNORM; + + /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ + if ((req->opcode == IORING_OP_RECVMSG) && + (req->sr_msg.msg_flags & MSG_ERRQUEUE)) + mask &= ~POLLIN; + mask |= POLLERR | POLLPRI;
ipt.pt._qproc = io_async_queue_proc; @@@ -7311,7 -7334,7 +7340,7 @@@ static int __io_sqe_files_update(struc table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; index = i & IORING_FILE_TABLE_MASK; if (table->files[index]) { - file = io_file_from_index(ctx, index); + file = table->files[index]; err = io_queue_file_removal(data, file); if (err) break; @@@ -7340,6 -7363,7 +7369,7 @@@ table->files[index] = file; err = io_sqe_file_register(ctx, file, i); if (err) { + table->files[index] = NULL; fput(file); break; } @@@ -7439,9 -7463,6 +7469,6 @@@ static int io_sq_offload_start(struct i { int ret;
- mmgrab(current->mm); - ctx->sqo_mm = current->mm; - if (ctx->flags & IORING_SETUP_SQPOLL) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) @@@ -7486,10 -7507,6 +7513,6 @@@ return 0; err: io_finish_async(ctx); - if (ctx->sqo_mm) { - mmdrop(ctx->sqo_mm); - ctx->sqo_mm = NULL; - } return ret; }
@@@ -8539,6 -8556,9 +8562,9 @@@ static int io_uring_create(unsigned ent ctx->user = user; ctx->creds = get_current_cred();
+ mmgrab(current->mm); + ctx->sqo_mm = current->mm; + /* * Account memory _before_ installing the file descriptor. Once * the descriptor is installed, it can get closed at any time. Also diff --combined include/linux/filter.h index 995625950cc1,ebfb7cfb65f1..05b4052715b9 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -1200,7 -1200,7 +1200,7 @@@ static inline u16 bpf_anc_helper(const BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } - /* Fallthrough. */ + fallthrough; default: return ftest->code; } @@@ -1236,17 -1236,13 +1236,17 @@@ struct bpf_sock_addr_kern
struct bpf_sock_ops_kern { struct sock *sk; - u32 op; union { u32 args[4]; u32 reply; u32 replylong[4]; }; - u32 is_fullsock; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; u64 temp; /* temp and everything after is not * initialized to 0 before calling * the BPF program. New fields that diff --combined kernel/bpf/cpumap.c index cf548fc88780,6386b7bb98f2..7e1a8ad0c32a --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@@ -79,6 -79,8 +79,6 @@@ struct bpf_cpu_map
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
-static int bq_flush_to_queue(struct xdp_bulk_queue *bq); - static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) { u32 value_size = attr->value_size; @@@ -277,7 -279,7 +277,7 @@@ static int cpu_map_bpf_prog_run_xdp(str break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_DROP: xdp_return_frame(xdpf); stats->drop++; @@@ -656,7 -658,6 +656,7 @@@ static int cpu_map_get_next_key(struct
static int cpu_map_btf_id; const struct bpf_map_ops cpu_map_ops = { + .map_meta_equal = bpf_map_meta_equal, .map_alloc = cpu_map_alloc, .map_free = cpu_map_free, .map_delete_elem = cpu_map_delete_elem, @@@ -668,7 -669,7 +668,7 @@@ .map_btf_id = &cpu_map_btf_id, };
-static int bq_flush_to_queue(struct xdp_bulk_queue *bq) +static void bq_flush_to_queue(struct xdp_bulk_queue *bq) { struct bpf_cpu_map_entry *rcpu = bq->obj; unsigned int processed = 0, drops = 0; @@@ -677,7 -678,7 +677,7 @@@ int i;
if (unlikely(!bq->count)) - return 0; + return;
q = rcpu->queue; spin_lock(&q->producer_lock); @@@ -700,12 -701,13 +700,12 @@@
/* Feedback loop via tracepoints */ trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); - return 0; }
/* Runs under RCU-read-side, plus in softirq under NAPI protection. * Thus, safe percpu variable access. */ -static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) +static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) { struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); @@@ -726,6 -728,8 +726,6 @@@
if (!bq->flush_node.prev) list_add(&bq->flush_node, flush_list); - - return 0; }
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, diff --combined kernel/bpf/syscall.c index 4108ef3b828b,b999e7ff2583..178c147350f5 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@@ -29,7 -29,6 +29,7 @@@ #include <linux/bpf_lsm.h> #include <linux/poll.h> #include <linux/bpf-netns.h> +#include <linux/rcupdate_trace.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@@ -91,7 -90,6 +91,7 @@@ int bpf_check_uarg_tail_zero(void __use }
const struct bpf_map_ops bpf_map_offload_ops = { + .map_meta_equal = bpf_map_meta_equal, .map_alloc = bpf_map_offload_map_alloc, .map_free = bpf_map_offload_map_free, .map_check_btf = map_check_no_btf, @@@ -159,11 -157,10 +159,11 @@@ static int bpf_map_update_value(struct if (bpf_map_is_dev_bound(map)) { return bpf_map_offload_update_elem(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || - map->map_type == BPF_MAP_TYPE_SOCKHASH || - map->map_type == BPF_MAP_TYPE_SOCKMAP || map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { return map->ops->map_update_elem(map, key, value, flags); + } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || + map->map_type == BPF_MAP_TYPE_SOCKMAP) { + return sock_map_update_elem_sys(map, key, value, flags); } else if (IS_FD_PROG_ARRAY(map)) { return bpf_fd_array_map_update_elem(map, f.file, key, value, flags); @@@ -771,8 -768,7 +771,8 @@@ static int map_check_btf(struct bpf_ma if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_ARRAY && map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && - map->map_type != BPF_MAP_TYPE_SK_STORAGE) + map->map_type != BPF_MAP_TYPE_SK_STORAGE && + map->map_type != BPF_MAP_TYPE_INODE_STORAGE) return -ENOTSUPP; if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > map->value_size) { @@@ -1732,14 -1728,10 +1732,14 @@@ static void __bpf_prog_put_noref(struc btf_put(prog->aux->btf); bpf_prog_free_linfo(prog);
- if (deferred) - call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); - else + if (deferred) { + if (prog->aux->sleepable) + call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); + else + call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); + } else { __bpf_prog_put_rcu(&prog->aux->rcu); + } }
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) @@@ -2037,7 -2029,7 +2037,7 @@@ bpf_prog_load_check_attach(enum bpf_pro case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; - /* fallthrough */ + fallthrough; default: return 0; } @@@ -2109,7 -2101,6 +2109,7 @@@ static int bpf_prog_load(union bpf_att if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT | BPF_F_TEST_STATE_FREQ | + BPF_F_SLEEPABLE | BPF_F_TEST_RND_HI32)) return -EINVAL;
@@@ -2165,7 -2156,6 +2165,7 @@@ }
prog->aux->offload_requested = !!attr->prog_ifindex; + prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
err = security_bpf_prog_alloc(prog->aux); if (err) @@@ -2644,7 -2634,7 +2644,7 @@@ static int bpf_raw_tp_link_fill_link_in u32 ulen = info->raw_tracepoint.tp_name_len; size_t tp_len = strlen(tp_name);
- if (ulen && !ubuf) + if (!ulen ^ !ubuf) return -EINVAL;
info->raw_tracepoint.tp_name_len = tp_len + 1; @@@ -4024,50 -4014,40 +4024,50 @@@ static int link_detach(union bpf_attr * return ret; }
-static int bpf_link_inc_not_zero(struct bpf_link *link) +static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) { - return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT; + return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); }
-#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id - -static int bpf_link_get_fd_by_id(const union bpf_attr *attr) +struct bpf_link *bpf_link_by_id(u32 id) { struct bpf_link *link; - u32 id = attr->link_id; - int fd, err;
- if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + if (!id) + return ERR_PTR(-ENOENT);
spin_lock_bh(&link_idr_lock); - link = idr_find(&link_idr, id); /* before link is "settled", ID is 0, pretend it doesn't exist yet */ + link = idr_find(&link_idr, id); if (link) { if (link->id) - err = bpf_link_inc_not_zero(link); + link = bpf_link_inc_not_zero(link); else - err = -EAGAIN; + link = ERR_PTR(-EAGAIN); } else { - err = -ENOENT; + link = ERR_PTR(-ENOENT); } spin_unlock_bh(&link_idr_lock); + return link; +}
- if (err) - return err; +#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id + +static int bpf_link_get_fd_by_id(const union bpf_attr *attr) +{ + struct bpf_link *link; + u32 id = attr->link_id; + int fd; + + if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + link = bpf_link_by_id(id); + if (IS_ERR(link)) + return PTR_ERR(link);
fd = bpf_link_new_fd(link); if (fd < 0) diff --combined kernel/bpf/verifier.c index b4e9c56b8b32,47e74f09fa37..86fdebb5ffd8 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -21,7 -21,6 +21,7 @@@ #include <linux/ctype.h> #include <linux/error-injection.h> #include <linux/bpf_lsm.h> +#include <linux/btf_ids.h>
#include "disasm.h"
@@@ -2626,19 -2625,11 +2626,19 @@@ static int check_map_access(struct bpf_
#define MAX_PACKET_OFF 0xffff
+static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) +{ + return prog->aux->linked_prog ? prog->aux->linked_prog->type + : prog->type; +} + static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { - switch (env->prog->type) { + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); + + switch (prog_type) { /* Program types only with direct read access go here! */ case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: @@@ -3881,33 -3872,6 +3881,33 @@@ static int int_ptr_type_to_size(enum bp return -EINVAL; }
+static int resolve_map_arg_type(struct bpf_verifier_env *env, + const struct bpf_call_arg_meta *meta, + enum bpf_arg_type *arg_type) +{ + if (!meta->map_ptr) { + /* kernel subsystem misconfigured verifier */ + verbose(env, "invalid map_ptr to access map->type\n"); + return -EACCES; + } + + switch (meta->map_ptr->map_type) { + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_SOCKHASH: + if (*arg_type == ARG_PTR_TO_MAP_VALUE) { + *arg_type = ARG_PTR_TO_SOCKET; + } else { + verbose(env, "invalid arg_type for sockmap/sockhash\n"); + return -EINVAL; + } + break; + + default: + break; + } + return 0; +} + static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) @@@ -3940,14 -3904,6 +3940,14 @@@ return -EACCES; }
+ if (arg_type == ARG_PTR_TO_MAP_VALUE || + arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || + arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { + err = resolve_map_arg_type(env, meta, &arg_type); + if (err) + return err; + } + if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || @@@ -4004,21 -3960,16 +4004,21 @@@ goto err_type; } } else if (arg_type == ARG_PTR_TO_BTF_ID) { + bool ids_match = false; + expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; if (!fn->check_btf_id) { if (reg->btf_id != meta->btf_id) { - verbose(env, "Helper has type %s got %s in R%d\n", - kernel_type_name(meta->btf_id), - kernel_type_name(reg->btf_id), regno); - - return -EACCES; + ids_match = btf_struct_ids_match(&env->log, reg->off, reg->btf_id, + meta->btf_id); + if (!ids_match) { + verbose(env, "Helper has type %s got %s in R%d\n", + kernel_type_name(meta->btf_id), + kernel_type_name(reg->btf_id), regno); + return -EACCES; + } } } else if (!fn->check_btf_id(reg->btf_id, arg)) { verbose(env, "Helper does not support %s in R%d\n", @@@ -4026,7 -3977,7 +4026,7 @@@
return -EACCES; } - if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { + if ((reg->off && !ids_match) || !tnum_is_const(reg->var_off) || reg->var_off.value) { verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", regno); return -EACCES; @@@ -4192,38 -4143,6 +4192,38 @@@ err_type return -EACCES; }
+static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) +{ + enum bpf_attach_type eatype = env->prog->expected_attach_type; + enum bpf_prog_type type = resolve_prog_type(env->prog); + + if (func_id != BPF_FUNC_map_update_elem) + return false; + + /* It's not possible to get access to a locked struct sock in these + * contexts, so updating is safe. + */ + switch (type) { + case BPF_PROG_TYPE_TRACING: + if (eatype == BPF_TRACE_ITER) + return true; + break; + case BPF_PROG_TYPE_SOCKET_FILTER: + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + case BPF_PROG_TYPE_XDP: + case BPF_PROG_TYPE_SK_REUSEPORT: + case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_SK_LOOKUP: + return true; + default: + break; + } + + verbose(env, "cannot update sockmap in this context\n"); + return false; +} + static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { @@@ -4295,8 -4214,7 +4295,8 @@@ func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map && func_id != BPF_FUNC_sk_select_reuseport && - func_id != BPF_FUNC_map_lookup_elem) + func_id != BPF_FUNC_map_lookup_elem && + !may_update_sockmap(env, func_id)) goto error; break; case BPF_MAP_TYPE_SOCKHASH: @@@ -4305,8 -4223,7 +4305,8 @@@ func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash && func_id != BPF_FUNC_sk_select_reuseport && - func_id != BPF_FUNC_map_lookup_elem) + func_id != BPF_FUNC_map_lookup_elem && + !may_update_sockmap(env, func_id)) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: @@@ -4325,11 -4242,6 +4325,11 @@@ func_id != BPF_FUNC_sk_storage_delete) goto error; break; + case BPF_MAP_TYPE_INODE_STORAGE: + if (func_id != BPF_FUNC_inode_storage_get && + func_id != BPF_FUNC_inode_storage_delete) + goto error; + break; default: break; } @@@ -4403,11 -4315,6 +4403,11 @@@ if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) goto error; break; + case BPF_FUNC_inode_storage_get: + case BPF_FUNC_inode_storage_delete: + if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) + goto error; + break; default: break; } @@@ -4868,11 -4775,6 +4868,11 @@@ static int check_helper_call(struct bpf return -EINVAL; }
+ if (fn->allowed && !fn->allowed(env->prog)) { + verbose(env, "helper call is not allowed in probe\n"); + return -EINVAL; + } + /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { @@@ -5334,7 -5236,7 +5334,7 @@@ static int adjust_ptr_min_max_vals(stru off_reg == dst_reg ? dst : src); return -EACCES; } - /* fall-through */ + fallthrough; default: break; } @@@ -5830,67 -5732,6 +5830,67 @@@ static void scalar_min_max_or(struct bp __update_reg_bounds(dst_reg); }
+static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) +{ + bool src_known = tnum_subreg_is_const(src_reg->var_off); + bool dst_known = tnum_subreg_is_const(dst_reg->var_off); + struct tnum var32_off = tnum_subreg(dst_reg->var_off); + s32 smin_val = src_reg->s32_min_value; + + /* Assuming scalar64_min_max_xor will be called so it is safe + * to skip updating register for known case. + */ + if (src_known && dst_known) + return; + + /* We get both minimum and maximum from the var32_off. */ + dst_reg->u32_min_value = var32_off.value; + dst_reg->u32_max_value = var32_off.value | var32_off.mask; + + if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { + /* XORing two positive sign numbers gives a positive, + * so safe to cast u32 result into s32. + */ + dst_reg->s32_min_value = dst_reg->u32_min_value; + dst_reg->s32_max_value = dst_reg->u32_max_value; + } else { + dst_reg->s32_min_value = S32_MIN; + dst_reg->s32_max_value = S32_MAX; + } +} + +static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) +{ + bool src_known = tnum_is_const(src_reg->var_off); + bool dst_known = tnum_is_const(dst_reg->var_off); + s64 smin_val = src_reg->smin_value; + + if (src_known && dst_known) { + /* dst_reg->var_off.value has been updated earlier */ + __mark_reg_known(dst_reg, dst_reg->var_off.value); + return; + } + + /* We get both minimum and maximum from the var_off. */ + dst_reg->umin_value = dst_reg->var_off.value; + dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; + + if (dst_reg->smin_value >= 0 && smin_val >= 0) { + /* XORing two positive sign numbers gives a positive, + * so safe to cast u64 result into s64. + */ + dst_reg->smin_value = dst_reg->umin_value; + dst_reg->smax_value = dst_reg->umax_value; + } else { + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } + + __update_reg_bounds(dst_reg); +} + static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, u64 umin_val, u64 umax_val) { @@@ -6199,11 -6040,6 +6199,11 @@@ static int adjust_scalar_min_max_vals(s scalar32_min_max_or(dst_reg, &src_reg); scalar_min_max_or(dst_reg, &src_reg); break; + case BPF_XOR: + dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); + scalar32_min_max_xor(dst_reg, &src_reg); + scalar_min_max_xor(dst_reg, &src_reg); + break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. @@@ -7451,7 -7287,7 +7451,7 @@@ static int check_ld_abs(struct bpf_veri u8 mode = BPF_MODE(insn->code); int i, err;
- if (!may_access_skb(env->prog->type)) { + if (!may_access_skb(resolve_prog_type(env->prog))) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } @@@ -7539,12 -7375,11 +7539,12 @@@ static int check_return_code(struct bpf const struct bpf_prog *prog = env->prog; struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); int err;
/* LSM and struct_ops func-ptr's return type could be "void" */ - if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS || - env->prog->type == BPF_PROG_TYPE_LSM) && + if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS || + prog_type == BPF_PROG_TYPE_LSM) && !prog->aux->attach_func_proto->type) return 0;
@@@ -7563,7 -7398,7 +7563,7 @@@ return -EACCES; }
- switch (env->prog->type) { + switch (prog_type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || @@@ -9319,7 -9154,6 +9319,7 @@@ static int check_map_prog_compatibility struct bpf_prog *prog)
{ + enum bpf_prog_type prog_type = resolve_prog_type(prog); /* * Validate that trace type programs use preallocated hash maps. * @@@ -9337,8 -9171,8 +9337,8 @@@ * now, but warnings are emitted so developers are made aware of * the unsafety and can fix their programs before this is enforced. */ - if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) { - if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { + if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) { + if (prog_type == BPF_PROG_TYPE_PERF_EVENT) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } @@@ -9350,8 -9184,8 +9350,8 @@@ verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); }
- if ((is_tracing_prog_type(prog->type) || - prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && + if ((is_tracing_prog_type(prog_type) || + prog_type == BPF_PROG_TYPE_SOCKET_FILTER) && map_value_has_spin_lock(map)) { verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); return -EINVAL; @@@ -9368,23 -9202,6 +9368,23 @@@ return -EINVAL; }
+ if (prog->aux->sleepable) + switch (map->map_type) { + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_LRU_HASH: + case BPF_MAP_TYPE_ARRAY: + if (!is_preallocated_map(map)) { + verbose(env, + "Sleepable programs can only use preallocated hash maps\n"); + return -EINVAL; + } + break; + default: + verbose(env, + "Sleepable programs can only use array and hash maps\n"); + return -EINVAL; + } + return 0; }
@@@ -10080,7 -9897,7 +10080,7 @@@ static int convert_ctx_accesses(struct insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code); env->prog->aux->num_exentries++; - } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) { + } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) { verbose(env, "Writes through BTF pointers are not allowed\n"); return -EINVAL; } @@@ -11003,37 -10820,6 +11003,37 @@@ static int check_attach_modify_return(s return -EINVAL; }
+/* non exhaustive list of sleepable bpf_lsm_*() functions */ +BTF_SET_START(btf_sleepable_lsm_hooks) +#ifdef CONFIG_BPF_LSM +BTF_ID(func, bpf_lsm_bprm_committed_creds) +#else +BTF_ID_UNUSED +#endif +BTF_SET_END(btf_sleepable_lsm_hooks) + +static int check_sleepable_lsm_hook(u32 btf_id) +{ + return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id); +} + +/* list of non-sleepable functions that are otherwise on + * ALLOW_ERROR_INJECTION list + */ +BTF_SET_START(btf_non_sleepable_error_inject) +/* Three functions below can be called from sleepable and non-sleepable context. + * Assume non-sleepable from bpf safety point of view. + */ +BTF_ID(func, __add_to_page_cache_locked) +BTF_ID(func, should_fail_alloc_page) +BTF_ID(func, should_failslab) +BTF_SET_END(btf_non_sleepable_error_inject) + +static int check_non_sleepable_error_inject(u32 btf_id) +{ + return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); +} + static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; @@@ -11051,12 -10837,6 +11051,12 @@@ long addr; u64 key;
+ if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && + prog->type != BPF_PROG_TYPE_LSM) { + verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n"); + return -EINVAL; + } + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) return check_struct_ops_btf_id(env);
@@@ -11208,7 -10988,7 +11208,7 @@@ default: if (!prog_extension) return -EINVAL; - /* fallthrough */ + fallthrough; case BPF_MODIFY_RETURN: case BPF_LSM_MAC: case BPF_TRACE_FENTRY: @@@ -11265,36 -11045,13 +11265,36 @@@ } }
- if (prog->expected_attach_type == BPF_MODIFY_RETURN) { + if (prog->aux->sleepable) { + ret = -EINVAL; + switch (prog->type) { + case BPF_PROG_TYPE_TRACING: + /* fentry/fexit/fmod_ret progs can be sleepable only if they are + * attached to ALLOW_ERROR_INJECTION and are not in denylist. + */ + if (!check_non_sleepable_error_inject(btf_id) && + within_error_injection_list(addr)) + ret = 0; + break; + case BPF_PROG_TYPE_LSM: + /* LSM progs check that they are attached to bpf_lsm_*() funcs. + * Only some of them are sleepable. + */ + if (check_sleepable_lsm_hook(btf_id)) + ret = 0; + break; + default: + break; + } + if (ret) + verbose(env, "%s is not sleepable\n", + prog->aux->attach_func_name); + } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { ret = check_attach_modify_return(prog, addr); if (ret) verbose(env, "%s() is not modifiable\n", prog->aux->attach_func_name); } - if (ret) goto out; tr->func.addr = (void *)addr; diff --combined net/batman-adv/bat_v_ogm.c index 11c3f98ba938,717fe657561d..8c1148fc73d7 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@@ -20,7 -20,6 +20,7 @@@ #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/prandom.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> @@@ -882,6 -881,12 +882,12 @@@ static void batadv_v_ogm_process(const ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@@ -1009,11 -1014,6 +1015,6 @@@ int batadv_v_ogm_packet_recv(struct sk_ if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); diff --combined net/batman-adv/bridge_loop_avoidance.c index 5c41cc52bc53,8500f56cbd10..ab6cec3c7586 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -437,7 -437,10 +437,10 @@@ static void batadv_bla_send_claim(struc batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN);
- netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); @@@ -1795,7 -1798,7 +1798,7 @@@ batadv_bla_loopdetect_check(struct bata
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function + /* backbone_gw is unreferenced in the report work function * if queue_work() call was successful */ if (!ret) diff --combined net/core/devlink.c index 58c8bb07fa19,80ec1cd81c64..49e911c19881 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@@ -5895,7 -5895,6 +5895,7 @@@ devlink_nl_cmd_health_reporter_get_dump list_for_each_entry(devlink, &devlink_list, list) { if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) continue; + mutex_lock(&devlink->lock); list_for_each_entry(port, &devlink->port_list, list) { mutex_lock(&port->reporters_lock); list_for_each_entry(reporter, &port->reporter_list, list) { @@@ -5910,14 -5909,12 +5910,14 @@@ NLM_F_MULTI); if (err) { mutex_unlock(&port->reporters_lock); + mutex_unlock(&devlink->lock); goto out; } idx++; } mutex_unlock(&port->reporters_lock); } + mutex_unlock(&devlink->lock); } out: mutex_unlock(&devlink_mutex); @@@ -6199,8 -6196,8 +6199,8 @@@ devlink_trap_action_get_from_info(struc
val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]); switch (val) { - case DEVLINK_TRAP_ACTION_DROP: /* fall-through */ - case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */ + case DEVLINK_TRAP_ACTION_DROP: + case DEVLINK_TRAP_ACTION_TRAP: case DEVLINK_TRAP_ACTION_MIRROR: *p_trap_action = val; break; @@@ -7558,11 -7555,11 +7558,11 @@@ int devlink_port_register(struct devlin devlink_port->index = port_index; devlink_port->registered = true; spin_lock_init(&devlink_port->type_lock); + INIT_LIST_HEAD(&devlink_port->reporter_list); + mutex_init(&devlink_port->reporters_lock); list_add_tail(&devlink_port->list, &devlink->port_list); INIT_LIST_HEAD(&devlink_port->param_list); mutex_unlock(&devlink->lock); - INIT_LIST_HEAD(&devlink_port->reporter_list); - mutex_init(&devlink_port->reporters_lock); INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); devlink_port_type_warn_schedule(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); @@@ -7579,13 -7576,13 +7579,13 @@@ void devlink_port_unregister(struct dev { struct devlink *devlink = devlink_port->devlink;
- WARN_ON(!list_empty(&devlink_port->reporter_list)); - mutex_destroy(&devlink_port->reporters_lock); devlink_port_type_warn_cancel(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); mutex_lock(&devlink->lock); list_del(&devlink_port->list); mutex_unlock(&devlink->lock); + WARN_ON(!list_empty(&devlink_port->reporter_list)); + mutex_destroy(&devlink_port->reporters_lock); } EXPORT_SYMBOL_GPL(devlink_port_unregister);
diff --combined net/core/filter.c index 47eef9a0be6a,1f647ab986b6..2ad9c0ef1946 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -4459,7 -4459,6 +4459,7 @@@ static int _bpf_setsockopt(struct sock } else { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + unsigned long timeout;
if (optlen != sizeof(int)) return -EINVAL; @@@ -4481,20 -4480,6 +4481,20 @@@ tp->snd_ssthresh = val; } break; + case TCP_BPF_DELACK_MAX: + timeout = usecs_to_jiffies(val); + if (timeout > TCP_DELACK_MAX || + timeout < TCP_TIMEOUT_MIN) + return -EINVAL; + inet_csk(sk)->icsk_delack_max = timeout; + break; + case TCP_BPF_RTO_MIN: + timeout = usecs_to_jiffies(val); + if (timeout > TCP_RTO_MIN || + timeout < TCP_TIMEOUT_MIN) + return -EINVAL; + inet_csk(sk)->icsk_rto_min = timeout; + break; case TCP_SAVE_SYN: if (val < 0 || val > 1) ret = -EINVAL; @@@ -4565,9 -4550,9 +4565,9 @@@ static int _bpf_getsockopt(struct sock tp = tcp_sk(sk);
if (optlen <= 0 || !tp->saved_syn || - optlen > tp->saved_syn[0]) + optlen > tcp_saved_syn_len(tp->saved_syn)) goto err_clear; - memcpy(optval, tp->saved_syn + 1, optlen); + memcpy(optval, tp->saved_syn->data, optlen); break; default: goto err_clear; @@@ -4669,99 -4654,9 +4669,99 @@@ static const struct bpf_func_proto bpf_ .arg5_type = ARG_CONST_SIZE, };
+static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, + int optname, const u8 **start) +{ + struct sk_buff *syn_skb = bpf_sock->syn_skb; + const u8 *hdr_start; + int ret; + + if (syn_skb) { + /* sk is a request_sock here */ + + if (optname == TCP_BPF_SYN) { + hdr_start = syn_skb->data; + ret = tcp_hdrlen(syn_skb); + } else if (optname == TCP_BPF_SYN_IP) { + hdr_start = skb_network_header(syn_skb); + ret = skb_network_header_len(syn_skb) + + tcp_hdrlen(syn_skb); + } else { + /* optname == TCP_BPF_SYN_MAC */ + hdr_start = skb_mac_header(syn_skb); + ret = skb_mac_header_len(syn_skb) + + skb_network_header_len(syn_skb) + + tcp_hdrlen(syn_skb); + } + } else { + struct sock *sk = bpf_sock->sk; + struct saved_syn *saved_syn; + + if (sk->sk_state == TCP_NEW_SYN_RECV) + /* synack retransmit. bpf_sock->syn_skb will + * not be available. It has to resort to + * saved_syn (if it is saved). + */ + saved_syn = inet_reqsk(sk)->saved_syn; + else + saved_syn = tcp_sk(sk)->saved_syn; + + if (!saved_syn) + return -ENOENT; + + if (optname == TCP_BPF_SYN) { + hdr_start = saved_syn->data + + saved_syn->mac_hdrlen + + saved_syn->network_hdrlen; + ret = saved_syn->tcp_hdrlen; + } else if (optname == TCP_BPF_SYN_IP) { + hdr_start = saved_syn->data + + saved_syn->mac_hdrlen; + ret = saved_syn->network_hdrlen + + saved_syn->tcp_hdrlen; + } else { + /* optname == TCP_BPF_SYN_MAC */ + + /* TCP_SAVE_SYN may not have saved the mac hdr */ + if (!saved_syn->mac_hdrlen) + return -ENOENT; + + hdr_start = saved_syn->data; + ret = saved_syn->mac_hdrlen + + saved_syn->network_hdrlen + + saved_syn->tcp_hdrlen; + } + } + + *start = hdr_start; + return ret; +} + BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { + if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && + optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { + int ret, copy_len = 0; + const u8 *start; + + ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start); + if (ret > 0) { + copy_len = ret; + if (optlen < copy_len) { + copy_len = optlen; + ret = -ENOSPC; + } + + memcpy(optval, start, copy_len); + } + + /* Zero out unused buffer at the end */ + memset(optval + copy_len, 0, optlen - copy_len); + + return ret; + } + return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen); }
@@@ -6255,232 -6150,6 +6255,232 @@@ static const struct bpf_func_proto bpf_ .arg3_type = ARG_ANYTHING, };
+static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend, + u8 search_kind, const u8 *magic, + u8 magic_len, bool *eol) +{ + u8 kind, kind_len; + + *eol = false; + + while (op < opend) { + kind = op[0]; + + if (kind == TCPOPT_EOL) { + *eol = true; + return ERR_PTR(-ENOMSG); + } else if (kind == TCPOPT_NOP) { + op++; + continue; + } + + if (opend - op < 2 || opend - op < op[1] || op[1] < 2) + /* Something is wrong in the received header. + * Follow the TCP stack's tcp_parse_options() + * and just bail here. + */ + return ERR_PTR(-EFAULT); + + kind_len = op[1]; + if (search_kind == kind) { + if (!magic_len) + return op; + + if (magic_len > kind_len - 2) + return ERR_PTR(-ENOMSG); + + if (!memcmp(&op[2], magic, magic_len)) + return op; + } + + op += kind_len; + } + + return ERR_PTR(-ENOMSG); +} + +BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, + void *, search_res, u32, len, u64, flags) +{ + bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN; + const u8 *op, *opend, *magic, *search = search_res; + u8 search_kind, search_len, copy_len, magic_len; + int ret; + + /* 2 byte is the minimal option len except TCPOPT_NOP and + * TCPOPT_EOL which are useless for the bpf prog to learn + * and this helper disallow loading them also. + */ + if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN) + return -EINVAL; + + search_kind = search[0]; + search_len = search[1]; + + if (search_len > len || search_kind == TCPOPT_NOP || + search_kind == TCPOPT_EOL) + return -EINVAL; + + if (search_kind == TCPOPT_EXP || search_kind == 253) { + /* 16 or 32 bit magic. +2 for kind and kind length */ + if (search_len != 4 && search_len != 6) + return -EINVAL; + magic = &search[2]; + magic_len = search_len - 2; + } else { + if (search_len) + return -EINVAL; + magic = NULL; + magic_len = 0; + } + + if (load_syn) { + ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op); + if (ret < 0) + return ret; + + opend = op + ret; + op += sizeof(struct tcphdr); + } else { + if (!bpf_sock->skb || + bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB) + /* This bpf_sock->op cannot call this helper */ + return -EPERM; + + opend = bpf_sock->skb_data_end; + op = bpf_sock->skb->data + sizeof(struct tcphdr); + } + + op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len, + &eol); + if (IS_ERR(op)) + return PTR_ERR(op); + + copy_len = op[1]; + ret = copy_len; + if (copy_len > len) { + ret = -ENOSPC; + copy_len = len; + } + + memcpy(search_res, op, copy_len); + return ret; +} + +static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = { + .func = bpf_sock_ops_load_hdr_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, + const void *, from, u32, len, u64, flags) +{ + u8 new_kind, new_kind_len, magic_len = 0, *opend; + const u8 *op, *new_op, *magic = NULL; + struct sk_buff *skb; + bool eol; + + if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB) + return -EPERM; + + if (len < 2 || flags) + return -EINVAL; + + new_op = from; + new_kind = new_op[0]; + new_kind_len = new_op[1]; + + if (new_kind_len > len || new_kind == TCPOPT_NOP || + new_kind == TCPOPT_EOL) + return -EINVAL; + + if (new_kind_len > bpf_sock->remaining_opt_len) + return -ENOSPC; + + /* 253 is another experimental kind */ + if (new_kind == TCPOPT_EXP || new_kind == 253) { + if (new_kind_len < 4) + return -EINVAL; + /* Match for the 2 byte magic also. + * RFC 6994: the magic could be 2 or 4 bytes. + * Hence, matching by 2 byte only is on the + * conservative side but it is the right + * thing to do for the 'search-for-duplication' + * purpose. + */ + magic = &new_op[2]; + magic_len = 2; + } + + /* Check for duplication */ + skb = bpf_sock->skb; + op = skb->data + sizeof(struct tcphdr); + opend = bpf_sock->skb_data_end; + + op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len, + &eol); + if (!IS_ERR(op)) + return -EEXIST; + + if (PTR_ERR(op) != -ENOMSG) + return PTR_ERR(op); + + if (eol) + /* The option has been ended. Treat it as no more + * header option can be written. + */ + return -ENOSPC; + + /* No duplication found. Store the header option. */ + memcpy(opend, from, new_kind_len); + + bpf_sock->remaining_opt_len -= new_kind_len; + bpf_sock->skb_data_end += new_kind_len; + + return 0; +} + +static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = { + .func = bpf_sock_ops_store_hdr_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, + u32, len, u64, flags) +{ + if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB) + return -EPERM; + + if (flags || len < 2) + return -EINVAL; + + if (len > bpf_sock->remaining_opt_len) + return -ENOSPC; + + bpf_sock->remaining_opt_len -= len; + + return 0; +} + +static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { + .func = bpf_sock_ops_reserve_hdr_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + #endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func) @@@ -6509,9 -6178,6 +6509,9 @@@ func == bpf_lwt_seg6_store_bytes || func == bpf_lwt_seg6_adjust_srh || func == bpf_lwt_seg6_action || +#endif +#ifdef CONFIG_INET + func == bpf_sock_ops_store_hdr_opt || #endif func == bpf_lwt_in_push_encap || func == bpf_lwt_xmit_push_encap) @@@ -6884,12 -6550,6 +6884,12 @@@ sock_ops_func_proto(enum bpf_func_id fu case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; #ifdef CONFIG_INET + case BPF_FUNC_load_hdr_opt: + return &bpf_sock_ops_load_hdr_opt_proto; + case BPF_FUNC_store_hdr_opt: + return &bpf_sock_ops_store_hdr_opt_proto; + case BPF_FUNC_reserve_hdr_opt: + return &bpf_sock_ops_reserve_hdr_opt_proto; case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; #endif /* CONFIG_INET */ @@@ -7689,20 -7349,6 +7689,20 @@@ static bool sock_ops_is_valid_access(in return false; info->reg_type = PTR_TO_SOCKET_OR_NULL; break; + case offsetof(struct bpf_sock_ops, skb_data): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_PACKET; + break; + case offsetof(struct bpf_sock_ops, skb_data_end): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_PACKET_END; + break; + case offsetof(struct bpf_sock_ops, skb_tcp_flags): + bpf_ctx_record_field_size(info, size_default); + return bpf_ctx_narrow_access_ok(off, size, + size_default); default: if (size != size_default) return false; @@@ -8804,22 -8450,17 +8804,22 @@@ static u32 sock_ops_convert_ctx_access( return insn - insn_buf;
switch (si->off) { - case offsetof(struct bpf_sock_ops, op) ... + case offsetof(struct bpf_sock_ops, op): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + op), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, op)); + break; + + case offsetof(struct bpf_sock_ops, replylong[0]) ... offsetof(struct bpf_sock_ops, replylong[3]): - BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) != - sizeof_field(struct bpf_sock_ops_kern, op)); BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != sizeof_field(struct bpf_sock_ops_kern, reply)); BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != sizeof_field(struct bpf_sock_ops_kern, replylong)); off = si->off; - off -= offsetof(struct bpf_sock_ops, op); - off += offsetof(struct bpf_sock_ops_kern, op); + off -= offsetof(struct bpf_sock_ops, replylong[0]); + off += offsetof(struct bpf_sock_ops_kern, replylong[0]); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, off); @@@ -9040,49 -8681,6 +9040,49 @@@ case offsetof(struct bpf_sock_ops, sk): SOCK_OPS_GET_SK(); break; + case offsetof(struct bpf_sock_ops, skb_data_end): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb_data_end), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb_data_end)); + break; + case offsetof(struct bpf_sock_ops, skb_data): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), + si->dst_reg, si->dst_reg, + offsetof(struct sk_buff, data)); + break; + case offsetof(struct bpf_sock_ops, skb_len): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), + si->dst_reg, si->dst_reg, + offsetof(struct sk_buff, len)); + break; + case offsetof(struct bpf_sock_ops, skb_tcp_flags): + off = offsetof(struct sk_buff, cb); + off += offsetof(struct tcp_skb_cb, tcp_flags); + *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, + skb), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + skb)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb, + tcp_flags), + si->dst_reg, si->dst_reg, off); + break; } return insn - insn_buf; } @@@ -9625,7 -9223,7 +9625,7 @@@ sk_reuseport_is_valid_access(int off, i case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < sizeof_field(struct sk_buff, protocol)) return false; - /* fall through */ + fallthrough; case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): case bpf_ctx_range(struct sk_reuseport_md, bind_inany): case bpf_ctx_range(struct sk_reuseport_md, len): diff --combined net/core/skbuff.c index a5c11aae9c89,6faf73d6a0f7..bfd748346f20 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -820,6 -820,7 +820,7 @@@ void skb_tx_error(struct sk_buff *skb } EXPORT_SYMBOL(skb_tx_error);
+ #ifdef CONFIG_TRACEPOINTS /** * consume_skb - free an skbuff * @skb: buffer to free @@@ -837,6 -838,7 +838,7 @@@ void consume_skb(struct sk_buff *skb __kfree_skb(skb); } EXPORT_SYMBOL(consume_skb); + #endif
/** * consume_stateless_skb - free an skbuff, assuming it is stateless @@@ -5953,7 -5955,8 +5955,7 @@@ static int pskb_carve_inside_nonlinear( size = SKB_WITH_OVERHEAD(ksize(data));
memcpy((struct skb_shared_info *)(data + size), - skb_shinfo(skb), offsetof(struct skb_shared_info, - frags[skb_shinfo(skb)->nr_frags])); + skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); if (skb_orphan_frags(skb, gfp_mask)) { kfree(data); return -ENOMEM; diff --combined net/core/skmsg.c index 1c81caf9630f,649583158983..4b5f7c8fecd1 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@@ -494,34 -494,14 +494,34 @@@ end
struct sk_psock *sk_psock_init(struct sock *sk, int node) { - struct sk_psock *psock = kzalloc_node(sizeof(*psock), - GFP_ATOMIC | __GFP_NOWARN, - node); - if (!psock) - return NULL; + struct sk_psock *psock; + struct proto *prot;
+ write_lock_bh(&sk->sk_callback_lock); + + if (inet_csk_has_ulp(sk)) { + psock = ERR_PTR(-EINVAL); + goto out; + } + + if (sk->sk_user_data) { + psock = ERR_PTR(-EBUSY); + goto out; + } + + psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); + if (!psock) { + psock = ERR_PTR(-ENOMEM); + goto out; + } + + prot = READ_ONCE(sk->sk_prot); psock->sk = sk; - psock->eval = __SK_NONE; + psock->eval = __SK_NONE; + psock->sk_proto = prot; + psock->saved_unhash = prot->unhash; + psock->saved_close = prot->close; + psock->saved_write_space = sk->sk_write_space;
INIT_LIST_HEAD(&psock->link); spin_lock_init(&psock->link_lock); @@@ -536,8 -516,6 +536,8 @@@ rcu_assign_sk_user_data_nocopy(sk, psock); sock_hold(sk);
+out: + write_unlock_bh(&sk->sk_callback_lock); return psock; } EXPORT_SYMBOL_GPL(sk_psock_init); @@@ -794,7 -772,6 +794,6 @@@ static void sk_psock_verdict_apply(stru sk_psock_skb_redirect(skb); break; case __SK_DROP: - /* fall-through */ default: out_free: kfree_skb(skb); diff --combined net/core/sock.c index 64d2aec5ed45,6c5c6b18eff4..ba9e7d91e2ef --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -413,6 -413,18 +413,6 @@@ static int sock_set_timeout(long *timeo return 0; }
-static void sock_warn_obsolete_bsdism(const char *name) -{ - static int warned; - static char warncomm[TASK_COMM_LEN]; - if (strcmp(warncomm, current->comm) && warned < 5) { - strcpy(warncomm, current->comm); - pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", - warncomm, name); - warned++; - } -} - static bool sock_needs_netstamp(const struct sock *sk) { switch (sk->sk_family) { @@@ -972,6 -984,7 +972,6 @@@ set_sndbuf break;
case SO_BSDCOMPAT: - sock_warn_obsolete_bsdism("setsockopt"); break;
case SO_PASSCRED: @@@ -995,7 -1008,7 +995,7 @@@ break; case SO_TIMESTAMPING_NEW: sock_set_flag(sk, SOCK_TSTAMP_NEW); - /* fall through */ + fallthrough; case SO_TIMESTAMPING_OLD: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; @@@ -1374,6 -1387,7 +1374,6 @@@ int sock_getsockopt(struct socket *sock break;
case SO_BSDCOMPAT: - sock_warn_obsolete_bsdism("getsockopt"); break;
case SO_TIMESTAMP_OLD: @@@ -3240,7 -3254,7 +3240,7 @@@ void sk_common_release(struct sock *sk sk->sk_prot->destroy(sk);
/* - * Observation: when sock_common_release is called, processes have + * Observation: when sk_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * diff --combined net/ipv4/raw.c index dfba39473b1d,407956be7deb..1170653a89cd --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@@ -260,12 -260,11 +260,12 @@@ static void raw_err(struct sock *sk, st err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; - err = icmp_err_convert[code].errno; - harderr = icmp_err_convert[code].fatal; if (code == ICMP_FRAG_NEEDED) { harderr = inet->pmtudisc != IP_PMTUDISC_DONT; err = EMSGSIZE; + } else { + err = icmp_err_convert[code].errno; + harderr = icmp_err_convert[code].fatal; } }
@@@ -611,8 -610,8 +611,8 @@@ static int raw_sendmsg(struct sock *sk } else if (!ipc.oif) { ipc.oif = inet->uc_index; } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { - /* oif is set, packet is to local broadcast and - * uc_index is set. oif is most likely set + /* oif is set, packet is to local broadcast + * and uc_index is set. oif is most likely set * by sk_bound_dev_if. If uc_index != oif check if the * oif is an L3 master and uc_index is an L3 slave. * If so, we want to allow the send using the uc_index. diff --combined net/mptcp/protocol.c index e6216c4f308c,365ba96c84b0..683196225f91 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@@ -24,6 -24,8 +24,6 @@@ #include "protocol.h" #include "mib.h"
-#define MPTCP_SAME_STATE TCP_MAX_STATES - #if IS_ENABLED(CONFIG_MPTCP_IPV6) struct mptcp6_sock { struct mptcp_sock msk; @@@ -191,7 -193,6 +191,6 @@@ static void mptcp_check_data_fin_ack(st sk->sk_state_change(sk); break; case TCP_CLOSING: - fallthrough; case TCP_LAST_ACK: inet_sk_state_store(sk, TCP_CLOSE); sk->sk_state_change(sk); @@@ -890,7 -891,6 +889,6 @@@ restart goto out; }
- wait_for_sndbuf: __mptcp_flush_join_list(msk); ssk = mptcp_subflow_get_send(msk); while (!sk_stream_memory_free(sk) || @@@ -980,7 -980,7 +978,7 @@@ */ mptcp_set_timeout(sk, ssk); release_sock(ssk); - goto wait_for_sndbuf; + goto restart; } } } @@@ -1539,7 -1539,7 +1537,7 @@@ static void mptcp_subflow_shutdown(stru case TCP_LISTEN: if (!(how & RCV_SHUTDOWN)) break; - /* fall through */ + fallthrough; case TCP_SYN_SENT: tcp_disconnect(ssk, O_NONBLOCK); break; diff --combined net/netlabel/netlabel_domainhash.c index 38aaeadec13d,f73a8382c275..dc8c39f51f7d --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@@ -85,6 -85,7 +85,7 @@@ static void netlbl_domhsh_free_entry(st kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@@ -537,6 -538,8 +538,8 @@@ int netlbl_domhsh_add(struct netlbl_dom goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL;
@@@ -580,6 -583,12 +583,12 @@@ int netlbl_domhsh_remove_entry(struct n { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; + #if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; + #endif /* IPv6 */
if (entry == NULL) return -ENOENT; @@@ -597,48 -606,41 +606,40 @@@ ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, - " nlbl_domain=%s res=%u", - entry->domain ? entry->domain : "(default)", - ret_val == 0 ? 1 : 0); + " nlbl_domain=%s res=1", + entry->domain ? entry->domain : "(default)"); audit_log_end(audit_buf); }
- if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; - #if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; - #endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); + } #if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } #endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; #if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; #endif /* IPv6 */ - } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val; } diff --combined net/netlink/af_netlink.c index 5cee1d0eaebe,d2d1448274f5..f9efd2c1cb50 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -353,7 -353,7 +353,7 @@@ static void netlink_rcv_wake(struct soc { struct netlink_sock *nlk = nlk_sk(sk);
- if (skb_queue_empty(&sk->sk_receive_queue)) + if (skb_queue_empty_lockless(&sk->sk_receive_queue)) clear_bit(NETLINK_S_CONGESTED, &nlk->state); if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) wake_up_interruptible(&nlk->wait); @@@ -848,7 -848,7 +848,7 @@@ retry * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap in the user namespace @user_ns. + * message has the capability @cap in the user namespace @user_ns. */ bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *user_ns, int cap) @@@ -867,7 -867,7 +867,7 @@@ EXPORT_SYMBOL(__netlink_ns_capable) * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap in the user namespace @user_ns. + * message has the capability @cap in the user namespace @user_ns. */ bool netlink_ns_capable(const struct sk_buff *skb, struct user_namespace *user_ns, int cap) @@@ -883,7 -883,7 +883,7 @@@ EXPORT_SYMBOL(netlink_ns_capable) * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap in all user namespaces. + * message has the capability @cap in all user namespaces. */ bool netlink_capable(const struct sk_buff *skb, int cap) { @@@ -898,7 -898,7 +898,7 @@@ EXPORT_SYMBOL(netlink_capable) * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the - * message has has the capability @cap over the network namespace of + * message has the capability @cap over the network namespace of * the socket we received the message from. */ bool netlink_net_capable(const struct sk_buff *skb, int cap) diff --combined net/netlink/policy.c index 5c9e7530865f,641ffbdd977a..62f977fa645a --- a/net/netlink/policy.c +++ b/net/netlink/policy.c @@@ -188,7 -188,7 +188,7 @@@ send_attribute goto next; case NLA_NESTED: type = NL_ATTR_TYPE_NESTED; - /* fall through */ + fallthrough; case NLA_NESTED_ARRAY: if (pt->type == NLA_NESTED_ARRAY) type = NL_ATTR_TYPE_NESTED_ARRAY; @@@ -254,6 -254,12 +254,6 @@@ pt->bitfield32_valid)) goto nla_put_failure; break; - case NLA_EXACT_LEN: - type = NL_ATTR_TYPE_BINARY; - if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len) || - nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, pt->len)) - goto nla_put_failure; - break; case NLA_STRING: case NLA_NUL_STRING: case NLA_BINARY: @@@ -263,27 -269,14 +263,27 @@@ type = NL_ATTR_TYPE_NUL_STRING; else type = NL_ATTR_TYPE_BINARY; - if (pt->len && nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, - pt->len)) - goto nla_put_failure; - break; - case NLA_MIN_LEN: - type = NL_ATTR_TYPE_BINARY; - if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len)) + + if (pt->validation_type == NLA_VALIDATE_RANGE || + pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG) { + struct netlink_range_validation range; + + nla_get_range_unsigned(pt, &range); + + if (range.min && + nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, + range.min)) + goto nla_put_failure; + + if (range.max < U16_MAX && + nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, + range.max)) + goto nla_put_failure; + } else if (pt->len && + nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, + pt->len)) { goto nla_put_failure; + } break; case NLA_FLAG: type = NL_ATTR_TYPE_FLAG; diff --combined net/sctp/sm_make_chunk.c index 467bd77b6986,c11c24524652..9a56ae2f3651 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@@ -1235,7 -1235,7 +1235,7 @@@ nodata
/* Create an Operation Error chunk of a fixed size, specifically, * min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads. - * This is a helper function to allocate an error chunk for for those + * This is a helper function to allocate an error chunk for those * invalid parameter codes in which we may not want to report all the * errors, if the incoming chunk is large. If it can't fit in a single * packet, we ignore it. @@@ -1780,7 -1780,7 +1780,7 @@@ no_hmac * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that - * that a cookie may be considered expired, but his would only slow + * a cookie may be considered expired, but this would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) @@@ -2077,7 -2077,7 +2077,7 @@@ static enum sctp_ierror sctp_process_un break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; - /* Fall through */ + fallthrough; case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. @@@ -2319,7 -2319,7 +2319,7 @@@ int sctp_process_init(struct sctp_assoc
/* This implementation defaults to making the first transport * added as the primary transport. The source address seems to - * be a a better choice than any of the embedded addresses. + * be a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; diff --combined net/socket.c index e84a8e281b4c,0c0144604f81..82262e1922f9 --- a/net/socket.c +++ b/net/socket.c @@@ -2628,11 -2628,9 +2628,11 @@@ long __sys_recvmsg_sock(struct socket * struct user_msghdr __user *umsg, struct sockaddr __user *uaddr, unsigned int flags) { - /* disallow ancillary data requests from this path */ - if (msg->msg_control || msg->msg_controllen) - return -EINVAL; + if (msg->msg_control || msg->msg_controllen) { + /* disallow ancillary data reqs unless cmsg is plain data */ + if (!(sock->ops->flags & PROTO_CMSG_DATA_ONLY)) + return -EINVAL; + }
return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0); } @@@ -3612,7 -3610,7 +3612,7 @@@ int kernel_getsockname(struct socket *s EXPORT_SYMBOL(kernel_getsockname);
/** - * kernel_peername - get the address which the socket is connected (kernel space) + * kernel_getpeername - get the address which the socket is connected (kernel space) * @sock: socket * @addr: address holder * @@@ -3673,7 -3671,7 +3673,7 @@@ int kernel_sendpage_locked(struct sock EXPORT_SYMBOL(kernel_sendpage_locked);
/** - * kernel_shutdown - shut down part of a full-duplex connection (kernel space) + * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space) * @sock: socket * @how: connection part * diff --combined net/tipc/socket.c index fd5bfaab8661,ebd280e767bd..dd93e8ecb2f4 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@@ -52,6 -52,7 +52,6 @@@ #define NAGLE_START_MAX 1024 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ -#define TIPC_FWD_MSG 1 #define TIPC_MAX_PORT 0xffffffff #define TIPC_MIN_PORT 1 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ @@@ -2770,18 -2771,21 +2770,21 @@@ static int tipc_shutdown(struct socket
trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - sk->sk_shutdown = SEND_SHUTDOWN; + if (tipc_sk_type_connectionless(sk)) + sk->sk_shutdown = SHUTDOWN_MASK; + else + sk->sk_shutdown = SEND_SHUTDOWN;
if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue);
- /* Wake up anyone sleeping in poll */ - sk->sk_state_change(sk); res = 0; } else { res = -ENOTCONN; } + /* Wake up anyone sleeping in poll. */ + sk->sk_state_change(sk);
release_sock(sk); return res; diff --combined net/wireless/nl80211.c index 201d029687cc,2c9e9a2d1688..52a35e788547 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -336,13 -336,6 +336,13 @@@ static const struct nla_policy nl80211_ .len = NL80211_MAX_SUPP_HT_RATES }, [NL80211_TXRATE_VHT] = NLA_POLICY_EXACT_LEN_WARN(sizeof(struct nl80211_txrate_vht)), [NL80211_TXRATE_GI] = { .type = NLA_U8 }, + [NL80211_TXRATE_HE] = NLA_POLICY_EXACT_LEN(sizeof(struct nl80211_txrate_he)), + [NL80211_TXRATE_HE_GI] = NLA_POLICY_RANGE(NLA_U8, + NL80211_RATE_INFO_HE_GI_0_8, + NL80211_RATE_INFO_HE_GI_3_2), + [NL80211_TXRATE_HE_LTF] = NLA_POLICY_RANGE(NLA_U8, + NL80211_RATE_INFO_HE_1XLTF, + NL80211_RATE_INFO_HE_4XLTF), };
static const struct nla_policy @@@ -546,10 -539,7 +546,10 @@@ static const struct nla_policy nl80211_ [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, - [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, }, + + /* need to include at least Auth Transaction and Status Code */ + [NL80211_ATTR_AUTH_DATA] = NLA_POLICY_MIN_LEN(4), + [NL80211_ATTR_VHT_CAPABILITY] = NLA_POLICY_EXACT_LEN_WARN(NL80211_VHT_CAPABILITY_LEN), [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127), @@@ -571,30 -561,23 +571,30 @@@ [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 }, - [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 }, + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = + NLA_POLICY_MAX(NLA_U16, NL80211_CRIT_PROTO_MAX_DURATION), [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, - [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY }, - [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY }, - [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY }, - [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY }, + [NL80211_ATTR_CNTDWN_OFFS_BEACON] = { .type = NLA_BINARY }, + [NL80211_ATTR_CNTDWN_OFFS_PRESP] = { .type = NLA_BINARY }, + [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = NLA_POLICY_MIN_LEN(2), + /* + * The value of the Length field of the Supported Operating + * Classes element is between 2 and 253. + */ + [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = + NLA_POLICY_RANGE(NLA_BINARY, 2, 253), [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG }, [NL80211_ATTR_OPMODE_NOTIF] = { .type = NLA_U8 }, [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, - [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY, - .len = IEEE80211_QOS_MAP_LEN_MAX }, + [NL80211_ATTR_QOS_MAP] = NLA_POLICY_RANGE(NLA_BINARY, + IEEE80211_QOS_MAP_LEN_MIN, + IEEE80211_QOS_MAP_LEN_MAX), [NL80211_ATTR_MAC_HINT] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, @@@ -642,17 -625,15 +642,17 @@@ .len = FILS_ERP_MAX_RRK_LEN }, [NL80211_ATTR_FILS_CACHE_ID] = NLA_POLICY_EXACT_LEN_WARN(2), [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, + [NL80211_ATTR_PMKR0_NAME] = NLA_POLICY_EXACT_LEN(WLAN_PMK_NAME_LEN), [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, - [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, - .len = NL80211_HE_MAX_CAPABILITY_LEN }, - + [NL80211_ATTR_HE_CAPABILITY] = + NLA_POLICY_RANGE(NLA_BINARY, + NL80211_HE_MIN_CAPABILITY_LEN, + NL80211_HE_MAX_CAPABILITY_LEN), [NL80211_ATTR_FTM_RESPONDER] = NLA_POLICY_NESTED(nl80211_ftm_responder_policy), [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), @@@ -673,8 -654,10 +673,8 @@@ [NL80211_ATTR_RECEIVE_MULTICAST] = { .type = NLA_FLAG }, [NL80211_ATTR_WIPHY_FREQ_OFFSET] = NLA_POLICY_RANGE(NLA_U32, 0, 999), [NL80211_ATTR_SCAN_FREQ_KHZ] = { .type = NLA_NESTED }, - [NL80211_ATTR_HE_6GHZ_CAPABILITY] = { - .type = NLA_EXACT_LEN, - .len = sizeof(struct ieee80211_he_6ghz_capa), - }, + [NL80211_ATTR_HE_6GHZ_CAPABILITY] = + NLA_POLICY_EXACT_LEN(sizeof(struct ieee80211_he_6ghz_capa)), };
/* policy for the key attributes */ @@@ -720,7 -703,7 +720,7 @@@ nl80211_wowlan_tcp_policy[NUM_NL80211_W [NL80211_WOWLAN_TCP_DST_MAC] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 }, - [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 }, + [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = NLA_POLICY_MIN_LEN(1), [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = { .len = sizeof(struct nl80211_wowlan_tcp_data_seq) }, @@@ -728,8 -711,8 +728,8 @@@ .len = sizeof(struct nl80211_wowlan_tcp_data_token) }, [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 }, - [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 }, - [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 }, + [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = NLA_POLICY_MIN_LEN(1), + [NL80211_WOWLAN_TCP_WAKE_MASK] = NLA_POLICY_MIN_LEN(1), }; #endif /* CONFIG_PM */
@@@ -755,7 -738,7 +755,7 @@@ nl80211_rekey_policy[NUM_NL80211_REKEY_ .type = NLA_BINARY, .len = NL80211_KCK_EXT_LEN }, - [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN_WARN(NL80211_REPLAY_CTR_LEN), + [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN(NL80211_REPLAY_CTR_LEN), [NL80211_REKEY_DATA_AKM] = { .type = NLA_U32 }, };
@@@ -795,8 -778,7 +795,8 @@@ nl80211_bss_select_policy[NL80211_BSS_S /* policy for NAN function attributes */ static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { - [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 }, + [NL80211_NAN_FUNC_TYPE] = + NLA_POLICY_MAX(NLA_U8, NL80211_NAN_FUNC_MAX_TYPE), [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, @@@ -4437,106 -4419,21 +4437,106 @@@ static bool vht_set_mcs_mask(struct iee return true; }
+static u16 he_mcs_map_to_mcs_mask(u8 he_mcs_map) +{ + switch (he_mcs_map) { + case IEEE80211_HE_MCS_NOT_SUPPORTED: + return 0; + case IEEE80211_HE_MCS_SUPPORT_0_7: + return 0x00FF; + case IEEE80211_HE_MCS_SUPPORT_0_9: + return 0x03FF; + case IEEE80211_HE_MCS_SUPPORT_0_11: + return 0xFFF; + default: + break; + } + return 0; +} + +static void he_build_mcs_mask(u16 he_mcs_map, + u16 he_mcs_mask[NL80211_HE_NSS_MAX]) +{ + u8 nss; + + for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) { + he_mcs_mask[nss] = he_mcs_map_to_mcs_mask(he_mcs_map & 0x03); + he_mcs_map >>= 2; + } +} + +static u16 he_get_txmcsmap(struct genl_info *info, + const struct ieee80211_sta_he_cap *he_cap) +{ + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + __le16 tx_mcs; + + switch (wdev->chandef.width) { + case NL80211_CHAN_WIDTH_80P80: + tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80p80; + break; + case NL80211_CHAN_WIDTH_160: + tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_160; + break; + default: + tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80; + break; + } + return le16_to_cpu(tx_mcs); +} + +static bool he_set_mcs_mask(struct genl_info *info, + struct wireless_dev *wdev, + struct ieee80211_supported_band *sband, + struct nl80211_txrate_he *txrate, + u16 mcs[NL80211_HE_NSS_MAX]) +{ + const struct ieee80211_sta_he_cap *he_cap; + u16 tx_mcs_mask[NL80211_HE_NSS_MAX] = {}; + u16 tx_mcs_map = 0; + u8 i; + + he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype); + if (!he_cap) + return false; + + memset(mcs, 0, sizeof(u16) * NL80211_HE_NSS_MAX); + + tx_mcs_map = he_get_txmcsmap(info, he_cap); + + /* Build he_mcs_mask from HE capabilities */ + he_build_mcs_mask(tx_mcs_map, tx_mcs_mask); + + for (i = 0; i < NL80211_HE_NSS_MAX; i++) { + if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i]) + mcs[i] = txrate->mcs[i]; + else + return false; + } + + return true; +} + static int nl80211_parse_tx_bitrate_mask(struct genl_info *info, struct nlattr *attrs[], enum nl80211_attrs attr, - struct cfg80211_bitrate_mask *mask) + struct cfg80211_bitrate_mask *mask, + struct net_device *dev) { struct nlattr *tb[NL80211_TXRATE_MAX + 1]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct wireless_dev *wdev = dev->ieee80211_ptr; int rem, i; struct nlattr *tx_rates; struct ieee80211_supported_band *sband; - u16 vht_tx_mcs_map; + u16 vht_tx_mcs_map, he_tx_mcs_map;
memset(mask, 0, sizeof(*mask)); /* Default to all rates enabled */ for (i = 0; i < NUM_NL80211_BANDS; i++) { + const struct ieee80211_sta_he_cap *he_cap; + sband = rdev->wiphy.bands[i];
if (!sband) @@@ -4552,16 -4449,6 +4552,16 @@@
vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs); + + he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype); + if (!he_cap) + continue; + + he_tx_mcs_map = he_get_txmcsmap(info, he_cap); + he_build_mcs_mask(he_tx_mcs_map, mask->control[i].he_mcs); + + mask->control[i].he_gi = 0xFF; + mask->control[i].he_ltf = 0xFF; }
/* if no rates are given set it back to the defaults */ @@@ -4617,25 -4504,13 +4617,25 @@@ if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI) return -EINVAL; } + if (tb[NL80211_TXRATE_HE] && + !he_set_mcs_mask(info, wdev, sband, + nla_data(tb[NL80211_TXRATE_HE]), + mask->control[band].he_mcs)) + return -EINVAL; + if (tb[NL80211_TXRATE_HE_GI]) + mask->control[band].he_gi = + nla_get_u8(tb[NL80211_TXRATE_HE_GI]); + if (tb[NL80211_TXRATE_HE_LTF]) + mask->control[band].he_ltf = + nla_get_u8(tb[NL80211_TXRATE_HE_LTF]);
if (mask->control[band].legacy == 0) { - /* don't allow empty legacy rates if HT or VHT + /* don't allow empty legacy rates if HT, VHT or HE * are not even supported. */ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported || - rdev->wiphy.bands[band]->vht_cap.vht_supported)) + rdev->wiphy.bands[band]->vht_cap.vht_supported || + ieee80211_get_he_iftype_cap(sband, wdev->iftype))) return -EINVAL;
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) @@@ -4646,10 -4521,6 +4646,10 @@@ if (mask->control[band].vht_mcs[i]) goto out;
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) + if (mask->control[band].he_mcs[i]) + goto out; + /* legacy and mcs rates may not be both empty */ return -EINVAL; } @@@ -4960,9 -4831,8 +4960,9 @@@ static bool nl80211_valid_auth_type(str return false; return true; case NL80211_CMD_START_AP: - /* SAE not supported yet */ - if (auth_type == NL80211_AUTHTYPE_SAE) + if (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP) && + auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK || @@@ -5026,7 -4896,8 +5026,7 @@@ static int nl80211_start_ap(struct sk_b params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); params.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); - if (params.ssid_len == 0 || - params.ssid_len > IEEE80211_MAX_SSID_LEN) + if (params.ssid_len == 0) return -EINVAL; }
@@@ -5095,8 -4966,7 +5095,8 @@@ if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, - ¶ms.beacon_rate); + ¶ms.beacon_rate, + dev); if (err) return err;
@@@ -5967,9 -5837,11 +5967,9 @@@ static int nl80211_parse_sta_channel_in nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]); /* * Need to include at least one (first channel, number of - * channels) tuple for each subband, and must have proper - * tuples for the rest of the data as well. + * channels) tuple for each subband (checked in policy), + * and must have proper tuples for the rest of the data as well. */ - if (params->supported_channels_len < 2) - return -EINVAL; if (params->supported_channels_len % 2) return -EINVAL; } @@@ -5979,6 -5851,13 +5979,6 @@@ nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); params->supported_oper_classes_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); - /* - * The value of the Length field of the Supported Operating - * Classes element is between 2 and 253. - */ - if (params->supported_oper_classes_len < 2 || - params->supported_oper_classes_len > 253) - return -EINVAL; } return 0; } @@@ -6001,6 -5880,9 +6001,6 @@@ static int nl80211_set_station_tdls(str nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params->he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); - - if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) - return -EINVAL; }
err = nl80211_parse_sta_channel_info(info, params); @@@ -6129,7 -6011,7 +6129,7 @@@ static int nl80211_set_station(struct s
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) params.he_6ghz_capa = - nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]) params.airtime_weight = @@@ -6259,6 -6141,10 +6259,6 @@@ static int nl80211_new_station(struct s nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); - - /* max len is validated in nla policy */ - if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) - return -EINVAL; }
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) @@@ -8530,14 -8416,23 +8530,14 @@@ nl80211_parse_sched_scan(struct wiphy * }
if (ssid) { - if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { - err = -EINVAL; - goto out_free; - } memcpy(request->match_sets[i].ssid.ssid, nla_data(ssid), nla_len(ssid)); request->match_sets[i].ssid.ssid_len = nla_len(ssid); } - if (bssid) { - if (nla_len(bssid) != ETH_ALEN) { - err = -EINVAL; - goto out_free; - } + if (bssid) memcpy(request->match_sets[i].bssid, nla_data(bssid), ETH_ALEN); - }
/* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; @@@ -8892,10 -8787,10 +8892,10 @@@ static int nl80211_channel_switch(struc if (err) return err;
- if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]) + if (!csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]) return -EINVAL;
- len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); + len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]); if (!len || (len % sizeof(u16))) return -EINVAL;
@@@ -8906,7 -8801,7 +8906,7 @@@ return -EINVAL;
params.counter_offsets_beacon = - nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); + nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
/* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_beacon; i++) { @@@ -8919,8 -8814,8 +8919,8 @@@ return -EINVAL; }
- if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) { - len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); + if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) { + len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]); if (!len || (len % sizeof(u16))) return -EINVAL;
@@@ -8931,7 -8826,7 +8931,7 @@@ return -EINVAL;
params.counter_offsets_presp = - nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); + nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
/* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_presp; i++) { @@@ -9414,6 -9309,9 +9414,6 @@@ static int nl80211_authenticate(struct return -EINVAL; auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]); auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]); - /* need to include at least Auth Transaction and Status Code */ - if (auth_data_len < 4) - return -EINVAL; }
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; @@@ -9553,9 -9451,7 +9553,9 @@@ static int nl80211_crypto_settings(stru
if (info->attrs[NL80211_ATTR_SAE_PASSWORD]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_SAE_OFFLOAD)) + NL80211_EXT_FEATURE_SAE_OFFLOAD) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP)) return -EINVAL; settings->sae_pwd = nla_data(info->attrs[NL80211_ATTR_SAE_PASSWORD]); @@@ -10902,8 -10798,7 +10902,8 @@@ static int nl80211_set_tx_bitrate_mask( return -EOPNOTSUPP;
err = nl80211_parse_tx_bitrate_mask(info, info->attrs, - NL80211_ATTR_TX_RATES, &mask); + NL80211_ATTR_TX_RATES, &mask, + dev); if (err) return err;
@@@ -11511,8 -11406,7 +11511,8 @@@ static int nl80211_join_mesh(struct sk_ if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, - &setup.beacon_rate); + &setup.beacon_rate, + dev); if (err) return err;
@@@ -12464,6 -12358,8 +12464,6 @@@ static int nl80211_set_rekey_data(struc if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_KCK]) return -EINVAL; - if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) - return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK && nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KEK_EXT_LEN)) @@@ -12788,7 -12684,8 +12788,7 @@@ static int nl80211_nan_add_func(struct
func->cookie = cfg80211_assign_cookie(rdev);
- if (!tb[NL80211_NAN_FUNC_TYPE] || - nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) { + if (!tb[NL80211_NAN_FUNC_TYPE]) { err = -EINVAL; goto out; } @@@ -13278,6 -13175,9 +13278,6 @@@ static int nl80211_crit_protocol_start( duration = nla_get_u16(info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]);
- if (duration > NL80211_CRIT_PROTO_MAX_DURATION) - return -ERANGE; - ret = rdev_crit_proto_start(rdev, wdev, proto, duration); if (!ret) rdev->crit_proto_nlportid = info->snd_portid; @@@ -13662,7 -13562,8 +13662,7 @@@ static int nl80211_set_qos_map(struct s pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]); len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]);
- if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN || - len > IEEE80211_QOS_MAP_LEN_MAX) + if (len % 2) return -EINVAL;
qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL); @@@ -13930,9 -13831,17 +13930,9 @@@ static int nl80211_set_pmk(struct sk_bu goto out; }
- if (info->attrs[NL80211_ATTR_PMKR0_NAME]) { - int r0_name_len = nla_len(info->attrs[NL80211_ATTR_PMKR0_NAME]); - - if (r0_name_len != WLAN_PMK_NAME_LEN) { - ret = -EINVAL; - goto out; - } - + if (info->attrs[NL80211_ATTR_PMKR0_NAME]) pmk_conf.pmk_r0_name = nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]); - }
ret = rdev_set_pmk(rdev, dev, &pmk_conf); out: @@@ -13991,7 -13900,8 +13991,7 @@@ static int nl80211_external_auth(struc
if (info->attrs[NL80211_ATTR_SSID]) { params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); - if (params.ssid.ssid_len == 0 || - params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN) + if (params.ssid.ssid_len == 0) return -EINVAL; memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]), @@@ -14292,7 -14202,7 +14292,7 @@@ static int parse_tid_conf(struct cfg802 if (tid_conf->txrate_type != NL80211_TX_RATE_AUTOMATIC) { attr = NL80211_TID_CONFIG_ATTR_TX_RATE; err = nl80211_parse_tx_bitrate_mask(info, attrs, attr, - &tid_conf->txrate_mask); + &tid_conf->txrate_mask, dev); if (err) return err;
diff --combined net/wireless/reg.c index dcd3d39a5372,d8a90d397423..0ab7808fcec8 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@@ -1594,7 -1594,7 +1594,7 @@@ freq_reg_info_regd(u32 center_freq
/* * We only need to know if one frequency rule was - * was in center_freq's band, that's enough, so lets + * in center_freq's band, that's enough, so let's * not overwrite it once found */ if (!band_rule_found) @@@ -1691,18 -1691,57 +1691,18 @@@ static uint32_t reg_rule_to_chan_bw_fla return bw_flags; }
-/* - * Note that right now we assume the desired channel bandwidth - * is always 20 MHz for each individual channel (HT40 uses 20 MHz - * per channel, the primary and the extension channel). - */ -static void handle_channel(struct wiphy *wiphy, - enum nl80211_reg_initiator initiator, - struct ieee80211_channel *chan) +static void handle_channel_single_rule(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ieee80211_channel *chan, + u32 flags, + struct regulatory_request *lr, + struct wiphy *request_wiphy, + const struct ieee80211_reg_rule *reg_rule) { - u32 flags, bw_flags = 0; - const struct ieee80211_reg_rule *reg_rule = NULL; + u32 bw_flags = 0; const struct ieee80211_power_rule *power_rule = NULL; - struct wiphy *request_wiphy = NULL; - struct regulatory_request *lr = get_last_request(); const struct ieee80211_regdomain *regd;
- request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); - - flags = chan->orig_flags; - - reg_rule = freq_reg_info(wiphy, ieee80211_channel_to_khz(chan)); - if (IS_ERR(reg_rule)) { - /* - * We will disable all channels that do not match our - * received regulatory rule unless the hint is coming - * from a Country IE and the Country IE had no information - * about a band. The IEEE 802.11 spec allows for an AP - * to send only a subset of the regulatory rules allowed, - * so an AP in the US that only supports 2.4 GHz may only send - * a country IE with information for the 2.4 GHz band - * while 5 GHz is still supported. - */ - if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && - PTR_ERR(reg_rule) == -ERANGE) - return; - - if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && - request_wiphy && request_wiphy == wiphy && - request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { - pr_debug("Disabling freq %d.%03d MHz for good\n", - chan->center_freq, chan->freq_offset); - chan->orig_flags |= IEEE80211_CHAN_DISABLED; - chan->flags = chan->orig_flags; - } else { - pr_debug("Disabling freq %d.%03d MHz\n", - chan->center_freq, chan->freq_offset); - chan->flags |= IEEE80211_CHAN_DISABLED; - } - return; - } - regd = reg_get_regdomain(wiphy);
power_rule = ®_rule->power_rule; @@@ -1764,204 -1803,6 +1764,204 @@@ chan->max_power = chan->max_reg_power; }
+static void handle_channel_adjacent_rules(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ieee80211_channel *chan, + u32 flags, + struct regulatory_request *lr, + struct wiphy *request_wiphy, + const struct ieee80211_reg_rule *rrule1, + const struct ieee80211_reg_rule *rrule2, + struct ieee80211_freq_range *comb_range) +{ + u32 bw_flags1 = 0; + u32 bw_flags2 = 0; + const struct ieee80211_power_rule *power_rule1 = NULL; + const struct ieee80211_power_rule *power_rule2 = NULL; + const struct ieee80211_regdomain *regd; + + regd = reg_get_regdomain(wiphy); + + power_rule1 = &rrule1->power_rule; + power_rule2 = &rrule2->power_rule; + bw_flags1 = reg_rule_to_chan_bw_flags(regd, rrule1, chan); + bw_flags2 = reg_rule_to_chan_bw_flags(regd, rrule2, chan); + + if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && + request_wiphy && request_wiphy == wiphy && + request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { + /* This guarantees the driver's requested regulatory domain + * will always be used as a base for further regulatory + * settings + */ + chan->flags = + map_regdom_flags(rrule1->flags) | + map_regdom_flags(rrule2->flags) | + bw_flags1 | + bw_flags2; + chan->orig_flags = chan->flags; + chan->max_antenna_gain = + min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain), + MBI_TO_DBI(power_rule2->max_antenna_gain)); + chan->orig_mag = chan->max_antenna_gain; + chan->max_reg_power = + min_t(int, MBM_TO_DBM(power_rule1->max_eirp), + MBM_TO_DBM(power_rule2->max_eirp)); + chan->max_power = chan->max_reg_power; + chan->orig_mpwr = chan->max_reg_power; + + if (chan->flags & IEEE80211_CHAN_RADAR) { + chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) + chan->dfs_cac_ms = max_t(unsigned int, + rrule1->dfs_cac_ms, + rrule2->dfs_cac_ms); + } + + return; + } + + chan->dfs_state = NL80211_DFS_USABLE; + chan->dfs_state_entered = jiffies; + + chan->beacon_found = false; + chan->flags = flags | bw_flags1 | bw_flags2 | + map_regdom_flags(rrule1->flags) | + map_regdom_flags(rrule2->flags); + + /* reg_rule_to_chan_bw_flags may forbids 10 and forbids 20 MHz + * (otherwise no adj. rule case), recheck therefore + */ + if (cfg80211_does_bw_fit_range(comb_range, + ieee80211_channel_to_khz(chan), + MHZ_TO_KHZ(10))) + chan->flags &= ~IEEE80211_CHAN_NO_10MHZ; + if (cfg80211_does_bw_fit_range(comb_range, + ieee80211_channel_to_khz(chan), + MHZ_TO_KHZ(20))) + chan->flags &= ~IEEE80211_CHAN_NO_20MHZ; + + chan->max_antenna_gain = + min_t(int, chan->orig_mag, + min_t(int, + MBI_TO_DBI(power_rule1->max_antenna_gain), + MBI_TO_DBI(power_rule2->max_antenna_gain))); + chan->max_reg_power = min_t(int, + MBM_TO_DBM(power_rule1->max_eirp), + MBM_TO_DBM(power_rule2->max_eirp)); + + if (chan->flags & IEEE80211_CHAN_RADAR) { + if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) + chan->dfs_cac_ms = max_t(unsigned int, + rrule1->dfs_cac_ms, + rrule2->dfs_cac_ms); + else + chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + } + + if (chan->orig_mpwr) { + /* Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER + * will always follow the passed country IE power settings. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) + chan->max_power = chan->max_reg_power; + else + chan->max_power = min(chan->orig_mpwr, + chan->max_reg_power); + } else { + chan->max_power = chan->max_reg_power; + } +} + +/* Note that right now we assume the desired channel bandwidth + * is always 20 MHz for each individual channel (HT40 uses 20 MHz + * per channel, the primary and the extension channel). + */ +static void handle_channel(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ieee80211_channel *chan) +{ + const u32 orig_chan_freq = ieee80211_channel_to_khz(chan); + struct regulatory_request *lr = get_last_request(); + struct wiphy *request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); + const struct ieee80211_reg_rule *rrule = NULL; + const struct ieee80211_reg_rule *rrule1 = NULL; + const struct ieee80211_reg_rule *rrule2 = NULL; + + u32 flags = chan->orig_flags; + + rrule = freq_reg_info(wiphy, orig_chan_freq); + if (IS_ERR(rrule)) { + /* check for adjacent match, therefore get rules for + * chan - 20 MHz and chan + 20 MHz and test + * if reg rules are adjacent + */ + rrule1 = freq_reg_info(wiphy, + orig_chan_freq - MHZ_TO_KHZ(20)); + rrule2 = freq_reg_info(wiphy, + orig_chan_freq + MHZ_TO_KHZ(20)); + if (!IS_ERR(rrule1) && !IS_ERR(rrule2)) { + struct ieee80211_freq_range comb_range; + + if (rrule1->freq_range.end_freq_khz != + rrule2->freq_range.start_freq_khz) + goto disable_chan; + + comb_range.start_freq_khz = + rrule1->freq_range.start_freq_khz; + comb_range.end_freq_khz = + rrule2->freq_range.end_freq_khz; + comb_range.max_bandwidth_khz = + min_t(u32, + rrule1->freq_range.max_bandwidth_khz, + rrule2->freq_range.max_bandwidth_khz); + + if (!cfg80211_does_bw_fit_range(&comb_range, + orig_chan_freq, + MHZ_TO_KHZ(20))) + goto disable_chan; + + handle_channel_adjacent_rules(wiphy, initiator, chan, + flags, lr, request_wiphy, + rrule1, rrule2, + &comb_range); + return; + } + +disable_chan: + /* We will disable all channels that do not match our + * received regulatory rule unless the hint is coming + * from a Country IE and the Country IE had no information + * about a band. The IEEE 802.11 spec allows for an AP + * to send only a subset of the regulatory rules allowed, + * so an AP in the US that only supports 2.4 GHz may only send + * a country IE with information for the 2.4 GHz band + * while 5 GHz is still supported. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + PTR_ERR(rrule) == -ERANGE) + return; + + if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && + request_wiphy && request_wiphy == wiphy && + request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { + pr_debug("Disabling freq %d.%03d MHz for good\n", + chan->center_freq, chan->freq_offset); + chan->orig_flags |= IEEE80211_CHAN_DISABLED; + chan->flags = chan->orig_flags; + } else { + pr_debug("Disabling freq %d.%03d MHz\n", + chan->center_freq, chan->freq_offset); + chan->flags |= IEEE80211_CHAN_DISABLED; + } + return; + } + + handle_channel_single_rule(wiphy, initiator, chan, flags, lr, + request_wiphy, rrule); +} + static void handle_band(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_supported_band *sband) @@@ -3105,6 -2946,9 +3105,9 @@@ int regulatory_hint_user(const char *al if (WARN_ON(!alpha2)) return -EINVAL;
+ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; @@@ -3326,7 -3170,7 +3329,7 @@@ static void restore_custom_reg_settings * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country - * keep their own regulatory domain on wiphy->regd so that does does + * keep their own regulatory domain on wiphy->regd so that does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user, bool cached)