The following commit has been merged in the master branch: commit cf244463a286ea57ea7e63c33614d302f776e62e Merge: cacfd6bfc381ce0e71dfb4ab902ca0fb0e1abe0f 41b9fb381a486360b2daaec0c7480f8e3ff72bc7 Author: Jakub Kicinski kuba@kernel.org Date: Thu Feb 1 14:33:26 2024 -0800
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
No conflicts or adjacent changes.
Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --combined MAINTAINERS index 05899589d74e,722b894f305e..f70c15292707 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -3168,10 -3168,10 +3168,10 @@@ F: drivers/hwmon/asus-ec-sensors.
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS M: Corentin Chary corentin.chary@gmail.com - L: acpi4asus-user@lists.sourceforge.net + M: Luke D. Jones luke@ljones.dev L: platform-driver-x86@vger.kernel.org S: Maintained - W: http://acpi4asus.sf.net + W: https://asus-linux.org/ F: drivers/platform/x86/asus*.c F: drivers/platform/x86/eeepc*.c
@@@ -3799,7 -3799,6 +3799,7 @@@ M: Alexei Starovoitov <ast@kernel.org M: Daniel Borkmann daniel@iogearbox.net M: Andrii Nakryiko andrii@kernel.org R: Martin KaFai Lau martin.lau@linux.dev +R: Eduard Zingerman eddyz87@gmail.com R: Song Liu song@kernel.org R: Yonghong Song yonghong.song@linux.dev R: John Fastabend john.fastabend@gmail.com @@@ -3860,7 -3859,6 +3860,7 @@@ F: net/unix/unix_bpf.
BPF [LIBRARY] (libbpf) M: Andrii Nakryiko andrii@kernel.org +M: Eduard Zingerman eddyz87@gmail.com L: bpf@vger.kernel.org S: Maintained F: tools/lib/bpf/ @@@ -3918,7 -3916,6 +3918,7 @@@ F: security/bpf
BPF [SELFTESTS] (Test Runners & Infrastructure) M: Andrii Nakryiko andrii@kernel.org +M: Eduard Zingerman eddyz87@gmail.com R: Mykola Lysenko mykolal@fb.com L: bpf@vger.kernel.org S: Maintained @@@ -5961,7 -5958,6 +5961,6 @@@ S: Maintaine F: drivers/platform/x86/dell/dell-wmi-descriptor.c
DELL WMI HARDWARE PRIVACY SUPPORT - M: Perry Yuan Perry.Yuan@dell.com L: Dell.Client.Kernel@dell.com L: platform-driver-x86@vger.kernel.org S: Maintained @@@ -10287,7 -10283,7 +10286,7 @@@ F: drivers/scsi/ibmvscsi/ibmvscsi F: include/scsi/viosrp.h
IBM Power Virtual SCSI Device Target Driver - M: Michael Cyr mikecyr@linux.ibm.com + M: Tyrel Datwyler tyreld@linux.ibm.com L: linux-scsi@vger.kernel.org L: target-devel@vger.kernel.org S: Supported @@@ -11729,6 -11725,7 +11728,7 @@@ F: fs/smb/server KERNEL UNIT TESTING FRAMEWORK (KUnit) M: Brendan Higgins brendanhiggins@google.com M: David Gow davidgow@google.com + R: Rae Moar rmoar@google.com L: linux-kselftest@vger.kernel.org L: kunit-dev@googlegroups.com S: Maintained @@@ -12907,6 -12904,8 +12907,8 @@@ M: Alejandro Colomar <alx@kernel.org L: linux-man@vger.kernel.org S: Maintained W: http://www.kernel.org/doc/man-pages + T: git git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git + T: git git://www.alejandro-colomar.es/src/alx/linux/man-pages/man-pages.git
MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP) M: Jeremy Kerr jk@codeconstruct.com.au @@@ -15182,6 -15181,7 +15184,7 @@@ F: Documentation/networking/net_cacheli F: drivers/connector/ F: drivers/net/ F: include/dt-bindings/net/ + F: include/linux/cn_proc.h F: include/linux/etherdevice.h F: include/linux/fcdevice.h F: include/linux/fddidevice.h @@@ -15189,6 -15189,7 +15192,7 @@@ F: include/linux/hippidevice. F: include/linux/if_* F: include/linux/inetdevice.h F: include/linux/netdevice.h + F: include/uapi/linux/cn_proc.h F: include/uapi/linux/if_* F: include/uapi/linux/netdevice.h X: drivers/net/wireless/ @@@ -18014,13 -18015,6 +18018,13 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml F: drivers/net/wireless/ath/ath9k/
+QUALCOMM ATHEROS QCA7K ETHERNET DRIVER +M: Stefan Wahren wahrenst@gmx.net +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/qca,qca7000.txt +F: drivers/net/ethernet/qualcomm/qca* + QUALCOMM BAM-DMUX WWAN NETWORK DRIVER M: Stephan Gerhold stephan@gerhold.net L: netdev@vger.kernel.org @@@ -18093,7 -18087,6 +18097,6 @@@ F: drivers/net/ethernet/qualcomm/emac
QUALCOMM ETHQOS ETHERNET DRIVER M: Vinod Koul vkoul@kernel.org - R: Bhupesh Sharma bhupesh.sharma@linaro.org L: netdev@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained @@@ -20561,6 -20554,7 +20564,7 @@@ F: Documentation/translations/sp_SP
SPARC + UltraSPARC (sparc/sparc64) M: "David S. Miller" davem@davemloft.net + M: Andreas Larsson andreas@gaisler.com L: sparclinux@vger.kernel.org S: Maintained Q: http://patchwork.ozlabs.org/project/sparclinux/list/ @@@ -24351,13 -24345,6 +24355,6 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/filesystems/zonefs.rst F: fs/zonefs/
- ZPOOL COMPRESSED PAGE STORAGE API - M: Dan Streetman ddstreet@ieee.org - L: linux-mm@kvack.org - S: Maintained - F: include/linux/zpool.h - F: mm/zpool.c - ZR36067 VIDEO FOR LINUX DRIVER M: Corentin Labbe clabbe@baylibre.com L: mjpeg-users@lists.sourceforge.net @@@ -24409,7 -24396,9 +24406,9 @@@ M: Nhat Pham <nphamcs@gmail.com L: linux-mm@kvack.org S: Maintained F: Documentation/admin-guide/mm/zswap.rst + F: include/linux/zpool.h F: include/linux/zswap.h + F: mm/zpool.c F: mm/zswap.c
THE REST diff --combined drivers/net/dsa/mt7530.c index 98a73a62f2ee,3c1f657593a8..216596b86de8 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@@ -487,6 -487,15 +487,6 @@@ mt7530_pad_clk_setup(struct dsa_switch return 0; }
-static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv) -{ - u32 val; - - val = mt7530_read(priv, MT7531_TOP_SIG_SR); - - return (val & PAD_DUAL_SGMII_EN) != 0; -} - static int mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) { @@@ -501,6 -510,9 +501,6 @@@ mt7531_pll_setup(struct mt7530_priv *pr u32 xtal; u32 val;
- if (mt7531_dual_sgmii_supported(priv)) - return; - val = mt7530_read(priv, MT7531_CREV); top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); hwstrap = mt7530_read(priv, MT7531_HWTRAP); @@@ -908,6 -920,8 +908,6 @@@ static const char *p5_intf_modes(unsign return "PHY P4"; case P5_INTF_SEL_GMAC5: return "GMAC5"; - case P5_INTF_SEL_GMAC5_SGMII: - return "GMAC5_SGMII"; default: return "unknown"; } @@@ -942,6 -956,9 +942,6 @@@ static void mt7530_setup_port5(struct d /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ val &= ~MHWTRAP_P5_DIS; break; - case P5_DISABLED: - interface = PHY_INTERFACE_MODE_NA; - break; default: dev_err(ds->dev, "Unsupported p5_intf_sel %d\n", priv->p5_intf_sel); @@@ -975,6 -992,8 +975,6 @@@ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
- priv->p5_interface = interface; - unlock_exit: mutex_unlock(&priv->reg_mutex); } @@@ -1016,6 -1035,10 +1016,6 @@@ mt753x_cpu_port_enable(struct dsa_switc mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
- /* Set CPU port number */ - if (priv->id == ID_MT7530 || priv->id == ID_MT7621) - mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port)); - /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that * is affine to the inbound user port. @@@ -2123,40 -2146,24 +2123,40 @@@ mt7530_free_irq_common(struct mt7530_pr static void mt7530_free_irq(struct mt7530_priv *priv) { - mt7530_free_mdio_irq(priv); + struct device_node *mnp, *np = priv->dev->of_node; + + mnp = of_get_child_by_name(np, "mdio"); + if (!mnp) + mt7530_free_mdio_irq(priv); + of_node_put(mnp); + mt7530_free_irq_common(priv); }
static int mt7530_setup_mdio(struct mt7530_priv *priv) { + struct device_node *mnp, *np = priv->dev->of_node; struct dsa_switch *ds = priv->ds; struct device *dev = priv->dev; struct mii_bus *bus; static int idx; - int ret; + int ret = 0; + + mnp = of_get_child_by_name(np, "mdio"); + + if (mnp && !of_device_is_available(mnp)) + goto out;
bus = devm_mdiobus_alloc(dev); - if (!bus) - return -ENOMEM; + if (!bus) { + ret = -ENOMEM; + goto out; + } + + if (!mnp) + ds->user_mii_bus = bus;
- ds->user_mii_bus = bus; bus->priv = priv; bus->name = KBUILD_MODNAME "-mii"; snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); @@@ -2167,18 -2174,16 +2167,18 @@@ bus->parent = dev; bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq) + if (priv->irq && !mnp) mt7530_setup_mdio_irq(priv);
- ret = devm_mdiobus_register(dev, bus); + ret = devm_of_mdiobus_register(dev, bus, mnp); if (ret) { dev_err(dev, "failed to register MDIO bus: %d\n", ret); - if (priv->irq) + if (priv->irq && !mnp) mt7530_free_mdio_irq(priv); }
+out: + of_node_put(mnp); return ret; }
@@@ -2321,13 -2326,16 +2321,13 @@@ mt7530_setup(struct dsa_switch *ds return ret;
/* Setup port 5 */ - priv->p5_intf_sel = P5_DISABLED; - interface = PHY_INTERFACE_MODE_NA; - if (!dsa_is_unused_port(ds, 5)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5; - ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface); - if (ret && ret != -ENODEV) - return ret; } else { - /* Scan the ethernet nodes. look for GMAC1, lookup used phy */ + /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY. + * Set priv->p5_intf_sel to the appropriate value if PHY muxing + * is detected. + */ for_each_child_of_node(dn, mac_np) { if (!of_device_is_compatible(mac_np, "mediatek,eth-mac")) @@@ -2358,10 -2366,6 +2358,10 @@@ of_node_put(phy_node); break; } + + if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 || + priv->p5_intf_sel == P5_INTF_SEL_PHY_P4) + mt7530_setup_port5(ds, interface); }
#ifdef CONFIG_GPIOLIB @@@ -2372,6 -2376,8 +2372,6 @@@ } #endif /* CONFIG_GPIOLIB */
- mt7530_setup_port5(ds, interface); - /* Flush the FDB table */ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); if (ret < 0) @@@ -2468,12 -2474,6 +2468,12 @@@ mt7531_setup(struct dsa_switch *ds return -ENODEV; }
+ /* MT7531AE has got two SGMII units. One for port 5, one for port 6. + * MT7531BE has got only one SGMII unit which is for port 6. + */ + val = mt7530_read(priv, MT7531_TOP_SIG_SR); + priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN); + /* all MACs must be forced link-down before sw reset */ for (i = 0; i < MT7530_NUM_PORTS; i++) mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); @@@ -2483,18 -2483,21 +2483,18 @@@ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
- mt7531_pll_setup(priv); - - if (mt7531_dual_sgmii_supported(priv)) { - priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; - + if (!priv->p5_sgmii) { + mt7531_pll_setup(priv); + } else { /* Let ds->user_mii_bus be able to access external phy. */ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, MT7531_EXT_P_MDC_11); mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, MT7531_EXT_P_MDIO_12); - } else { - priv->p5_intf_sel = P5_INTF_SEL_GMAC5; } - dev_dbg(ds->dev, "P5 support %s interface\n", - p5_intf_modes(priv->p5_intf_sel)); + + if (!dsa_is_unused_port(ds, 5)) + priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK, MT7531_GPIO0_INTERRUPT); @@@ -2532,14 -2535,12 +2532,14 @@@ static void mt7530_mac_port_get_caps(st struct phylink_config *config) { switch (port) { - case 0 ... 4: /* Internal phy */ + /* Ports which are connected to switch PHYs. There is no MII pinout. */ + case 0 ... 4: __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + /* Port 5 supports rgmii with delays, mii, and gmii. */ + case 5: phy_interface_set_rgmii(config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); @@@ -2547,8 -2548,7 +2547,8 @@@ config->supported_interfaces); break;
- case 6: /* 1st cpu port */ + /* Port 6 supports rgmii and trgmii. */ + case 6: __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_TRGMII, @@@ -2557,30 -2557,30 +2557,30 @@@ } }
-static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) -{ - return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); -} - static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { struct mt7530_priv *priv = ds->priv;
switch (port) { - case 0 ... 4: /* Internal phy */ + /* Ports which are connected to switch PHYs. There is no MII pinout. */ + case 0 ... 4: __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); break;
- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ - if (mt7531_is_rgmii_port(priv, port)) { + /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on + * MT7531AE. + */ + case 5: + if (!priv->p5_sgmii) { phy_interface_set_rgmii(config->supported_interfaces); break; } fallthrough;
- case 6: /* 1st cpu port supports sgmii/8023z only */ + /* Port 6 supports sgmii/802.3z. */ + case 6: __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_1000BASEX, @@@ -2599,13 -2599,11 +2599,13 @@@ static void mt7988_mac_port_get_caps(st phy_interface_zero(config->supported_interfaces);
switch (port) { - case 0 ... 4: /* Internal phy */ + /* Ports which are connected to switch PHYs. There is no MII pinout. */ + case 0 ... 4: __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); break;
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */ case 6: __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); @@@ -2643,7 -2641,7 +2643,7 @@@ static int mt7531_rgmii_setup(struct mt { u32 val;
- if (!mt7531_is_rgmii_port(priv, port)) { + if (priv->p5_sgmii) { dev_err(priv->dev, "RGMII mode is not available for port %d\n", port); return -EINVAL; @@@ -2769,12 -2767,12 +2769,12 @@@ mt753x_phylink_mac_config(struct dsa_sw u32 mcr_cur, mcr_new;
switch (port) { - case 0 ... 4: /* Internal phy */ + case 0 ... 4: if (state->interface != PHY_INTERFACE_MODE_GMII && state->interface != PHY_INTERFACE_MODE_INTERNAL) goto unsupported; break; - case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + case 5: if (priv->p5_interface == state->interface) break;
@@@ -2784,7 -2782,7 +2784,7 @@@ if (priv->p5_intf_sel != P5_DISABLED) priv->p5_interface = state->interface; break; - case 6: /* 1st cpu port */ + case 6: if (priv->p6_interface == state->interface) break;
@@@ -2840,8 -2838,7 +2840,7 @@@ static void mt753x_phylink_mac_link_up( /* MT753x MAC works in 1G full duplex mode for all up-clocked * variants. */ - if (interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_TRGMII || + if (interface == PHY_INTERFACE_MODE_TRGMII || (phy_interface_mode_is_8023z(interface))) { speed = SPEED_1000; duplex = DUPLEX_FULL; @@@ -2887,7 -2884,7 +2886,7 @@@ mt7531_cpu_port_config(struct dsa_switc
switch (port) { case 5: - if (mt7531_is_rgmii_port(priv, port)) + if (!priv->p5_sgmii) interface = PHY_INTERFACE_MODE_RGMII; else interface = PHY_INTERFACE_MODE_2500BASEX; @@@ -3039,7 -3036,7 +3038,7 @@@ mt753x_setup(struct dsa_switch *ds mt7530_free_irq_common(priv);
if (priv->create_sgmii) { - ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv)); + ret = priv->create_sgmii(priv); if (ret && priv->irq) mt7530_free_irq(priv); } @@@ -3048,7 -3045,7 +3047,7 @@@ }
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) + struct ethtool_keee *e) { struct mt7530_priv *priv = ds->priv; u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); @@@ -3060,7 -3057,7 +3059,7 @@@ }
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) + struct ethtool_keee *e) { struct mt7530_priv *priv = ds->priv; u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; @@@ -3077,36 -3074,6 +3076,36 @@@ return 0; }
+static void +mt753x_conduit_state_change(struct dsa_switch *ds, + const struct net_device *conduit, + bool operational) +{ + struct dsa_port *cpu_dp = conduit->dsa_ptr; + struct mt7530_priv *priv = ds->priv; + int val = 0; + u8 mask; + + /* Set the CPU port to trap frames to for MT7530. Trapped frames will be + * forwarded to the numerically smallest CPU port whose conduit + * interface is up. + */ + if (priv->id != ID_MT7530 && priv->id != ID_MT7621) + return; + + mask = BIT(cpu_dp->index); + + if (operational) + priv->active_cpu_ports |= mask; + else + priv->active_cpu_ports &= ~mask; + + if (priv->active_cpu_ports) + val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports)); + + mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val); +} + static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface) { return 0; @@@ -3162,7 -3129,6 +3161,7 @@@ const struct dsa_switch_ops mt7530_swit .phylink_mac_link_up = mt753x_phylink_mac_link_up, .get_mac_eee = mt753x_get_mac_eee, .set_mac_eee = mt753x_set_mac_eee, + .conduit_state_change = mt753x_conduit_state_change, }; EXPORT_SYMBOL_GPL(mt7530_switch_ops);
diff --combined drivers/net/dsa/mv88e6xxx/chip.c index 8b0079b8e0e0,614cabb5c1b0..6eec2e4aa031 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@@ -1451,14 -1451,14 +1451,14 @@@ static void mv88e6xxx_get_regs(struct d }
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) + struct ethtool_keee *e) { /* Nothing to do on the port's MAC */ return 0; }
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) + struct ethtool_keee *e) { /* Nothing to do on the port's MAC */ return 0; @@@ -3659,7 -3659,7 +3659,7 @@@ static int mv88e6xxx_mdio_read_c45(stru int err;
if (!chip->info->ops->phy_read_c45) - return -EOPNOTSUPP; + return 0xffff;
mv88e6xxx_reg_lock(chip); err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val); diff --combined drivers/net/ethernet/google/gve/gve_rx.c index c32b3d40beb0,76615d47e055..20f5a9e7fae9 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@@ -23,9 -23,7 +23,9 @@@ static void gve_rx_free_buffer(struct d gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); }
-static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) +static void gve_rx_unfill_pages(struct gve_priv *priv, + struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg) { u32 slots = rx->mask + 1; int i; @@@ -38,7 -36,7 +38,7 @@@ for (i = 0; i < slots; i++) page_ref_sub(rx->data.page_info[i].page, rx->data.page_info[i].pagecnt_bias - 1); - gve_unassign_qpl(priv, rx->data.qpl->id); + gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id); rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { @@@ -51,26 -49,16 +51,26 @@@ rx->data.page_info = NULL; }
-static void gve_rx_free_ring(struct gve_priv *priv, int idx) +void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) +{ + int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + + if (!gve_rx_was_added_to_block(priv, idx)) + return; + + gve_remove_napi(priv, ntfy_idx); + gve_rx_remove_from_block(priv, idx); +} + +static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg) { - struct gve_rx_ring *rx = &priv->rx[idx]; struct device *dev = &priv->pdev->dev; u32 slots = rx->mask + 1; + int idx = rx->q_num; size_t bytes;
- gve_rx_remove_from_block(priv, idx); - - bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; + bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); rx->desc.desc_ring = NULL;
@@@ -78,7 -66,7 +78,7 @@@ rx->q_resources, rx->q_resources_bus); rx->q_resources = NULL;
- gve_rx_unfill_pages(priv, rx); + gve_rx_unfill_pages(priv, rx, cfg);
bytes = sizeof(*rx->data.data_ring) * slots; dma_free_coherent(dev, bytes, rx->data.data_ring, @@@ -105,8 -93,7 +105,8 @@@ static void gve_setup_rx_buffer(struct
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, struct gve_rx_slot_page_info *page_info, - union gve_rx_data_slot *data_slot) + union gve_rx_data_slot *data_slot, + struct gve_rx_ring *rx) { struct page *page; dma_addr_t dma; @@@ -114,19 -101,14 +114,19 @@@
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE, GFP_ATOMIC); - if (err) + if (err) { + u64_stats_update_begin(&rx->statss); + rx->rx_buf_alloc_fail++; + u64_stats_update_end(&rx->statss); return err; + }
gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); return 0; }
-static int gve_prefill_rx_pages(struct gve_rx_ring *rx) +static int gve_rx_prefill_pages(struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg) { struct gve_priv *priv = rx->gve; u32 slots; @@@ -145,7 -127,7 +145,7 @@@ return -ENOMEM;
if (!rx->data.raw_addressing) { - rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); + rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num); if (!rx->data.qpl) { kvfree(rx->data.page_info); rx->data.page_info = NULL; @@@ -161,9 -143,8 +161,9 @@@ &rx->data.data_ring[i].qpl_offset); continue; } - err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], - &rx->data.data_ring[i]); + err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, + &rx->data.page_info[i], + &rx->data.data_ring[i], rx); if (err) goto alloc_err_rda; } @@@ -204,7 -185,7 +204,7 @@@ alloc_err_qpl page_ref_sub(rx->data.page_info[i].page, rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id); + gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id); rx->data.qpl = NULL;
return err; @@@ -226,23 -207,13 +226,23 @@@ static void gve_rx_ctx_clear(struct gve ctx->drop_pkt = false; }
-static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) +void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx) +{ + int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + + gve_rx_add_to_block(priv, idx); + gve_add_napi(priv, ntfy_idx, gve_napi_poll); +} + +static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx) { - struct gve_rx_ring *rx = &priv->rx[idx]; struct device *hdev = &priv->pdev->dev; + u32 slots = priv->rx_data_slot_cnt; int filled_pages; size_t bytes; - u32 slots; int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); @@@ -252,8 -223,9 +252,8 @@@ rx->gve = priv; rx->q_num = idx;
- slots = priv->rx_data_slot_cnt; rx->mask = slots - 1; - rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; + rx->data.raw_addressing = cfg->raw_addressing;
/* alloc rx data ring */ bytes = sizeof(*rx->data.data_ring) * slots; @@@ -274,7 -246,7 +274,7 @@@ goto abort_with_slots; }
- filled_pages = gve_prefill_rx_pages(rx); + filled_pages = gve_rx_prefill_pages(rx, cfg); if (filled_pages < 0) { err = -ENOMEM; goto abort_with_copy_pool; @@@ -297,7 -269,7 +297,7 @@@ (unsigned long)rx->data.data_bus);
/* alloc rx desc ring */ - bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; + bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, GFP_KERNEL); if (!rx->desc.desc_ring) { @@@ -305,11 -277,15 +305,11 @@@ goto abort_with_q_resources; } rx->cnt = 0; - rx->db_threshold = priv->rx_desc_cnt / 2; + rx->db_threshold = slots / 2; rx->desc.seqno = 1;
- /* Allocating half-page buffers allows page-flipping which is faster - * than copying or allocating new pages. - */ rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_rx_ctx_clear(&rx->ctx); - gve_rx_add_to_block(priv, idx);
return 0;
@@@ -318,7 -294,7 +318,7 @@@ abort_with_q_resources rx->q_resources, rx->q_resources_bus); rx->q_resources = NULL; abort_filled: - gve_rx_unfill_pages(priv, rx); + gve_rx_unfill_pages(priv, rx, cfg); abort_with_copy_pool: kvfree(rx->qpl_copy_pool); rx->qpl_copy_pool = NULL; @@@ -330,58 -306,36 +330,58 @@@ abort_with_slots return err; }
-int gve_rx_alloc_rings(struct gve_priv *priv) +int gve_rx_alloc_rings_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg) { + struct gve_rx_ring *rx; int err = 0; - int i; + int i, j; + + if (!cfg->raw_addressing && !cfg->qpls) { + netif_err(priv, drv, priv->dev, + "Cannot alloc QPL ring before allocing QPLs\n"); + return -EINVAL; + }
- for (i = 0; i < priv->rx_cfg.num_queues; i++) { - err = gve_rx_alloc_ring(priv, i); + rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), + GFP_KERNEL); + if (!rx) + return -ENOMEM; + + for (i = 0; i < cfg->qcfg->num_queues; i++) { + err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i); if (err) { netif_err(priv, drv, priv->dev, "Failed to alloc rx ring=%d: err=%d\n", i, err); - break; + goto cleanup; } } - /* Unallocate if there was an error */ - if (err) { - int j;
- for (j = 0; j < i; j++) - gve_rx_free_ring(priv, j); - } + cfg->rx = rx; + return 0; + +cleanup: + for (j = 0; j < i; j++) + gve_rx_free_ring_gqi(priv, &rx[j], cfg); + kvfree(rx); return err; }
-void gve_rx_free_rings_gqi(struct gve_priv *priv) +void gve_rx_free_rings_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg) { + struct gve_rx_ring *rx = cfg->rx; int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) - gve_rx_free_ring(priv, i); + if (!rx) + return; + + for (i = 0; i < cfg->qcfg->num_queues; i++) + gve_rx_free_ring_gqi(priv, &rx[i], cfg); + + kvfree(rx); + cfg->rx = NULL; }
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) @@@ -402,7 -356,7 +402,7 @@@ static enum pkt_hash_types gve_rss_type
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, - u16 packet_buffer_size, u16 len, + unsigned int truesize, u16 len, struct gve_rx_ctx *ctx) { u32 offset = page_info->page_offset + page_info->pad; @@@ -435,10 -389,10 +435,10 @@@ if (skb != ctx->skb_head) { ctx->skb_head->len += len; ctx->skb_head->data_len += len; - ctx->skb_head->truesize += packet_buffer_size; + ctx->skb_head->truesize += truesize; } skb_add_rx_frag(skb, num_frags, page_info->page, - offset, len, packet_buffer_size); + offset, len, truesize);
return ctx->skb_head; } @@@ -532,7 -486,7 +532,7 @@@ static struct sk_buff *gve_rx_copy_to_p
memcpy(alloc_page_info.page_address, src, page_info->pad + len); skb = gve_rx_add_frags(napi, &alloc_page_info, - rx->packet_buffer_size, + PAGE_SIZE, len, ctx);
u64_stats_update_begin(&rx->statss); @@@ -942,7 -896,10 +942,7 @@@ static bool gve_rx_refill_buffers(struc gve_rx_free_buffer(dev, page_info, data_slot); page_info->page = NULL; if (gve_rx_alloc_buffer(priv, dev, page_info, - data_slot)) { - u64_stats_update_begin(&rx->statss); - rx->rx_buf_alloc_fail++; - u64_stats_update_end(&rx->statss); + data_slot, rx)) { break; } } diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 33509237fe60,25519952f754..04d817dc5899 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -2507,13 -2507,6 +2507,13 @@@ static bool stmmac_xdp_xmit_zc(struct s if (!xsk_tx_peek_desc(pool, &xdp_desc)) break;
+ if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + xdp_desc.len > priv->plat->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + continue; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@@ -3939,6 -3932,9 +3939,9 @@@ static int __stmmac_open(struct net_dev priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz; + for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) + dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
stmmac_reset_queues_param(priv); @@@ -4505,13 -4501,6 +4508,13 @@@ static netdev_tx_t stmmac_xmit(struct s return stmmac_tso_xmit(skb, dev); }
+ if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + skb->len > priv->plat->est->max_sdu[queue]){ + priv->xstats.max_sdu_txq_drop[queue]++; + goto max_sdu_err; + } + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, @@@ -4729,7 -4718,6 +4732,7 @@@
dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); +max_sdu_err: dev_kfree_skb(skb); priv->xstats.tx_dropped++; return NETDEV_TX_OK; @@@ -4886,13 -4874,6 +4889,13 @@@ static int stmmac_xdp_xmit_xdpf(struct if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + xdpf->len > priv->plat->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + return STMMAC_XDP_CONSUMED; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) diff --combined include/linux/lsm_hook_defs.h index cd6fbc7af3f8,76458b6d53da..642272576582 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@@ -315,9 -315,9 +315,9 @@@ LSM_HOOK(int, 0, socket_getsockopt, str LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) - LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock, + LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) - LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock, + LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock, struct sk_buff *skb, u32 *secid) LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) @@@ -404,17 -404,10 +404,17 @@@ LSM_HOOK(void, LSM_RET_VOID, audit_rule LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size) LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode) LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog) -LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map) -LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map) -LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux) -LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux) +LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr, + struct bpf_token *token) +LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map) +LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr, + struct bpf_token *token) +LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog) +LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr, + struct path *path) +LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token) +LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd) +LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap) #endif /* CONFIG_BPF_SYSCALL */
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) diff --combined include/net/af_unix.h index 54e346152eb1,afd40dce40f3..627ea8e2d915 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@@ -8,29 -8,21 +8,29 @@@ #include <linux/refcount.h> #include <net/sock.h>
+#if IS_ENABLED(CONFIG_UNIX) +struct unix_sock *unix_get_socket(struct file *filp); +#else +static inline struct unix_sock *unix_get_socket(struct file *filp) +{ + return NULL; +} +#endif + +extern spinlock_t unix_gc_lock; +extern unsigned int unix_tot_inflight; + void unix_inflight(struct user_struct *user, struct file *fp); void unix_notinflight(struct user_struct *user, struct file *fp); -void unix_destruct_scm(struct sk_buff *skb); -void io_uring_destruct_scm(struct sk_buff *skb); void unix_gc(void); -void wait_for_unix_gc(void); -struct sock *unix_get_socket(struct file *filp); +void wait_for_unix_gc(struct scm_fp_list *fpl); + struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_MOD (256 - 1) #define UNIX_HASH_SIZE (256 * 2) #define UNIX_HASH_BITS 8
-extern unsigned int unix_tot_inflight; - struct unix_address { refcount_t refcnt; int len; @@@ -54,12 -46,6 +54,6 @@@ struct scm_stat
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
- #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) - #define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) - #define unix_state_lock_nested(s) \ - spin_lock_nested(&unix_sk(s)->lock, \ - SINGLE_DEPTH_NESTING) - /* The AF_UNIX socket */ struct unix_sock { /* WARNING: sk has to be the first member */ @@@ -69,7 -55,7 +63,7 @@@ struct mutex iolock, bindlock; struct sock *peer; struct list_head link; - atomic_long_t inflight; + unsigned long inflight; spinlock_t lock; unsigned long gc_flags; #define UNIX_GC_CANDIDATE 0 @@@ -85,6 -71,20 +79,20 @@@ #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk) #define unix_peer(sk) (unix_sk(sk)->peer)
+ #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) + #define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) + enum unix_socket_lock_class { + U_LOCK_NORMAL, + U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */ + U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */ + }; + + static inline void unix_state_lock_nested(struct sock *sk, + enum unix_socket_lock_class subclass) + { + spin_lock_nested(&unix_sk(sk)->lock, subclass); + } + #define peer_wait peer_wq.wait
long unix_inq_len(struct sock *sk); diff --combined include/net/netfilter/nf_tables.h index ac7c94d3648e,001226c34621..f3a7c4b1dd63 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@@ -1271,12 -1271,6 +1271,12 @@@ static inline bool nft_table_has_owner( return table->flags & NFT_TABLE_F_OWNER; }
+static inline bool nft_table_is_orphan(const struct nft_table *table) +{ + return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) == + NFT_TABLE_F_PERSIST; +} + static inline bool nft_base_chain_netdev(int family, u32 hooknum) { return family == NFPROTO_NETDEV || @@@ -1357,6 -1351,7 +1357,7 @@@ void nft_obj_notify(struct net *net, co * @type: stateful object numeric type * @owner: module owner * @maxattr: maximum netlink attribute + * @family: address family for AF-specific object types * @policy: netlink attribute policy */ struct nft_object_type { @@@ -1366,6 -1361,7 +1367,7 @@@ struct list_head list; u32 type; unsigned int maxattr; + u8 family; struct module *owner; const struct nla_policy *policy; }; diff --combined net/netfilter/nf_tables_api.c index 7f25a04e4b81,fc016befb46f..b68d1e59c786 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -1194,10 -1194,8 +1194,10 @@@ static void nf_tables_table_disable(str #define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1) #define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0) #define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1) +#define __NFT_TABLE_F_WAS_ORPHAN (__NFT_TABLE_F_INTERNAL << 2) #define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \ - __NFT_TABLE_F_WAS_AWAKEN) + __NFT_TABLE_F_WAS_AWAKEN | \ + __NFT_TABLE_F_WAS_ORPHAN)
static int nf_tables_updtable(struct nft_ctx *ctx) { @@@ -1217,11 -1215,8 +1217,11 @@@
if ((nft_table_has_owner(ctx->table) && !(flags & NFT_TABLE_F_OWNER)) || - (!nft_table_has_owner(ctx->table) && - flags & NFT_TABLE_F_OWNER)) + (flags & NFT_TABLE_F_OWNER && + !nft_table_is_orphan(ctx->table))) + return -EOPNOTSUPP; + + if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST) return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */ @@@ -1250,13 -1245,6 +1250,13 @@@ } }
+ if ((flags & NFT_TABLE_F_OWNER) && + !nft_table_has_owner(ctx->table)) { + ctx->table->nlpid = ctx->portid; + ctx->table->flags |= NFT_TABLE_F_OWNER | + __NFT_TABLE_F_WAS_ORPHAN; + } + nft_trans_table_update(trans) = true; nft_trans_commit_list_add_tail(ctx->net, trans);
@@@ -4247,18 -4235,23 +4247,18 @@@ static bool nft_set_ops_candidate(cons * given, in that case the amount of memory per element is used. */ static const struct nft_set_ops * -nft_select_set_ops(const struct nft_ctx *ctx, - const struct nlattr * const nla[], +nft_select_set_ops(const struct nft_ctx *ctx, u32 flags, const struct nft_set_desc *desc) { struct nftables_pernet *nft_net = nft_pernet(ctx->net); const struct nft_set_ops *ops, *bops; struct nft_set_estimate est, best; const struct nft_set_type *type; - u32 flags = 0; int i;
lockdep_assert_held(&nft_net->commit_mutex); lockdep_nfnl_nft_mutex_not_held();
- if (nla[NFTA_SET_FLAGS] != NULL) - flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); - bops = NULL; best.size = ~0; best.lookup = ~0; @@@ -5144,7 -5137,7 +5144,7 @@@ static int nf_tables_newset(struct sk_b if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc); + ops = nft_select_set_ops(&ctx, flags, &desc); if (IS_ERR(ops)) return PTR_ERR(ops);
@@@ -7558,11 -7551,15 +7558,15 @@@ nla_put_failure return -1; }
- static const struct nft_object_type *__nft_obj_type_get(u32 objtype) + static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family) { const struct nft_object_type *type;
list_for_each_entry(type, &nf_tables_objects, list) { + if (type->family != NFPROTO_UNSPEC && + type->family != family) + continue; + if (objtype == type->type) return type; } @@@ -7570,11 -7567,11 +7574,11 @@@ }
static const struct nft_object_type * - nft_obj_type_get(struct net *net, u32 objtype) + nft_obj_type_get(struct net *net, u32 objtype, u8 family) { const struct nft_object_type *type;
- type = __nft_obj_type_get(objtype); + type = __nft_obj_type_get(objtype, family); if (type != NULL && try_module_get(type->owner)) return type;
@@@ -7667,7 -7664,7 +7671,7 @@@ static int nf_tables_newobj(struct sk_b if (info->nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP;
- type = __nft_obj_type_get(objtype); + type = __nft_obj_type_get(objtype, family); if (WARN_ON_ONCE(!type)) return -ENOENT;
@@@ -7681,7 -7678,7 +7685,7 @@@ if (!nft_use_inc(&table->use)) return -EMFILE;
- type = nft_obj_type_get(net, objtype); + type = nft_obj_type_get(net, objtype, family); if (IS_ERR(type)) { err = PTR_ERR(type); goto err_type; @@@ -10427,10 -10424,6 +10431,10 @@@ static int __nf_tables_abort(struct ne } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) { trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT; } + if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) { + trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER; + trans->ctx.table->nlpid = 0; + } trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE; nft_trans_destroy(trans); } else { @@@ -11356,10 -11349,6 +11360,10 @@@ again list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && n->portid == table->nlpid) { + if (table->flags & NFT_TABLE_F_PERSIST) { + table->flags &= ~NFT_TABLE_F_OWNER; + continue; + } __nft_release_hook(net, table); list_del_rcu(&table->list); to_delete[deleted++] = table; diff --combined net/unix/af_unix.c index 1cfbc586adb4,30b178ebba60..4892e9428c9f --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@@ -118,6 -118,8 +118,6 @@@ #include <linux/btf_ids.h> #include <linux/bpf-cgroup.h>
-#include "scm.h" - static atomic_long_t unix_nr_socks; static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; @@@ -991,11 -993,11 +991,11 @@@ static struct sock *unix_create1(struc sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; sk->sk_destruct = unix_sock_destructor; - u = unix_sk(sk); + u = unix_sk(sk); + u->inflight = 0; u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); - atomic_long_set(&u->inflight, 0); INIT_LIST_HEAD(&u->link); mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->bindlock); /* single task binding lock */ @@@ -1342,13 -1344,11 +1342,11 @@@ static void unix_state_double_lock(stru unix_state_lock(sk1); return; } - if (sk1 < sk2) { - unix_state_lock(sk1); - unix_state_lock_nested(sk2); - } else { - unix_state_lock(sk2); - unix_state_lock_nested(sk1); - } + if (sk1 > sk2) + swap(sk1, sk2); + + unix_state_lock(sk1); + unix_state_lock_nested(sk2, U_LOCK_SECOND); }
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) @@@ -1589,7 -1589,7 +1587,7 @@@ restart goto out_unlock; }
- unix_state_lock_nested(sk); + unix_state_lock_nested(sk, U_LOCK_SECOND);
if (sk->sk_state != st) { unix_state_unlock(sk); @@@ -1788,52 -1788,6 +1786,52 @@@ out return err; }
+/* The "user->unix_inflight" variable is protected by the garbage + * collection lock, and we just read it locklessly here. If you go + * over the limit, there might be a tiny race in actually noticing + * it across threads. Tough. + */ +static inline bool too_many_unix_fds(struct task_struct *p) +{ + struct user_struct *user = current_user(); + + if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); + return false; +} + +static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + if (too_many_unix_fds(current)) + return -ETOOMANYREFS; + + /* Need to duplicate file references for the sake of garbage + * collection. Otherwise a socket in the fps might become a + * candidate for GC while the skb is not yet queued. + */ + UNIXCB(skb).fp = scm_fp_dup(scm->fp); + if (!UNIXCB(skb).fp) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) + unix_inflight(scm->fp->user, scm->fp->fp[i]); + + return 0; +} + +static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + scm->fp = UNIXCB(skb).fp; + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count - 1; i >= 0; i--) + unix_notinflight(scm->fp->user, scm->fp->fp[i]); +} + static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) { scm->fp = scm_fp_dup(UNIXCB(skb).fp); @@@ -1881,21 -1835,6 +1879,21 @@@ spin_unlock(&unix_gc_lock); }
+static void unix_destruct_scm(struct sk_buff *skb) +{ + struct scm_cookie scm; + + memset(&scm, 0, sizeof(scm)); + scm.pid = UNIXCB(skb).pid; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); + + /* Alas, it calls VFS */ + /* So fscking what? fput() had been SMP-safe since the last Summer */ + scm_destroy(&scm); + sock_wfree(skb); +} + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) { int err = 0; @@@ -1982,12 -1921,11 +1980,12 @@@ static int unix_dgram_sendmsg(struct so long timeo; int err;
- wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); if (err < 0) return err;
+ wait_for_unix_gc(scm.fp); + err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; @@@ -2259,12 -2197,11 +2257,12 @@@ static int unix_stream_sendmsg(struct s bool fds_sent = false; int data_len;
- wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); if (err < 0) return err;
+ wait_for_unix_gc(scm.fp); + err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) { #if IS_ENABLED(CONFIG_AF_UNIX_OOB) diff --combined net/unix/diag.c index c3648b706509,be19827eca36..ae39538c5042 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@@ -84,7 -84,7 +84,7 @@@ static int sk_diag_dump_icons(struct so * queue lock. With the other's queue locked it's * OK to lock the state. */ - unix_state_lock_nested(req); + unix_state_lock_nested(req, U_LOCK_DIAG); peer = unix_sk(req)->peer; buf[i++] = (peer ? sock_i_ino(peer) : 0); unix_state_unlock(req); @@@ -322,7 -322,6 +322,7 @@@ static int unix_diag_handler_dump(struc }
static const struct sock_diag_handler unix_diag_handler = { + .owner = THIS_MODULE, .family = AF_UNIX, .dump = unix_diag_handler_dump, }; diff --combined security/security.c index 73e009e3d937,3aaad75c9ce8..c41ad69a7f19 --- a/security/security.c +++ b/security/security.c @@@ -4255,7 -4255,19 +4255,19 @@@ EXPORT_SYMBOL(security_inode_setsecctx) */ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) { - return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen); + struct security_hook_list *hp; + int rc; + + /* + * Only one module will provide a security context. + */ + hlist_for_each_entry(hp, &security_hook_heads.inode_getsecctx, list) { + rc = hp->hook.inode_getsecctx(inode, ctx, ctxlen); + if (rc != LSM_RET_DEFAULT(inode_getsecctx)) + return rc; + } + + return LSM_RET_DEFAULT(inode_getsecctx); } EXPORT_SYMBOL(security_inode_getsecctx);
@@@ -4612,8 -4624,20 +4624,20 @@@ EXPORT_SYMBOL(security_sock_rcv_skb) int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) { - return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock, - optval, optlen, len); + struct security_hook_list *hp; + int rc; + + /* + * Only one module will provide a security context. + */ + hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_stream, + list) { + rc = hp->hook.socket_getpeersec_stream(sock, optval, optlen, + len); + if (rc != LSM_RET_DEFAULT(socket_getpeersec_stream)) + return rc; + } + return LSM_RET_DEFAULT(socket_getpeersec_stream); }
/** @@@ -4633,8 -4657,19 +4657,19 @@@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { - return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock, - skb, secid); + struct security_hook_list *hp; + int rc; + + /* + * Only one module will provide a security context. + */ + hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_dgram, + list) { + rc = hp->hook.socket_getpeersec_dgram(sock, skb, secid); + if (rc != LSM_RET_DEFAULT(socket_getpeersec_dgram)) + return rc; + } + return LSM_RET_DEFAULT(socket_getpeersec_dgram); } EXPORT_SYMBOL(security_socket_getpeersec_dgram);
@@@ -5410,87 -5445,29 +5445,87 @@@ int security_bpf_prog(struct bpf_prog * }
/** - * security_bpf_map_alloc() - Allocate a bpf map LSM blob - * @map: bpf map + * security_bpf_map_create() - Check if BPF map creation is allowed + * @map: BPF map object + * @attr: BPF syscall attributes used to create BPF map + * @token: BPF token used to grant user access + * + * Do a check when the kernel creates a new BPF map. This is also the + * point where LSM blob is allocated for LSMs that need them. + * + * Return: Returns 0 on success, error on failure. + */ +int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, + struct bpf_token *token) +{ + return call_int_hook(bpf_map_create, 0, map, attr, token); +} + +/** + * security_bpf_prog_load() - Check if loading of BPF program is allowed + * @prog: BPF program object + * @attr: BPF syscall attributes used to create BPF program + * @token: BPF token used to grant user access to BPF subsystem * - * Initialize the security field inside bpf map. + * Perform an access control check when the kernel loads a BPF program and + * allocates associated BPF program object. This hook is also responsible for + * allocating any required LSM state for the BPF program. * * Return: Returns 0 on success, error on failure. */ -int security_bpf_map_alloc(struct bpf_map *map) +int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, + struct bpf_token *token) { - return call_int_hook(bpf_map_alloc_security, 0, map); + return call_int_hook(bpf_prog_load, 0, prog, attr, token); }
/** - * security_bpf_prog_alloc() - Allocate a bpf program LSM blob - * @aux: bpf program aux info struct + * security_bpf_token_create() - Check if creating of BPF token is allowed + * @token: BPF token object + * @attr: BPF syscall attributes used to create BPF token + * @path: path pointing to BPF FS mount point from which BPF token is created * - * Initialize the security field inside bpf program. + * Do a check when the kernel instantiates a new BPF token object from BPF FS + * instance. This is also the point where LSM blob can be allocated for LSMs. * * Return: Returns 0 on success, error on failure. */ -int security_bpf_prog_alloc(struct bpf_prog_aux *aux) +int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, + struct path *path) { - return call_int_hook(bpf_prog_alloc_security, 0, aux); + return call_int_hook(bpf_token_create, 0, token, attr, path); +} + +/** + * security_bpf_token_cmd() - Check if BPF token is allowed to delegate + * requested BPF syscall command + * @token: BPF token object + * @cmd: BPF syscall command requested to be delegated by BPF token + * + * Do a check when the kernel decides whether provided BPF token should allow + * delegation of requested BPF syscall command. + * + * Return: Returns 0 on success, error on failure. + */ +int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd) +{ + return call_int_hook(bpf_token_cmd, 0, token, cmd); +} + +/** + * security_bpf_token_capable() - Check if BPF token is allowed to delegate + * requested BPF-related capability + * @token: BPF token object + * @cap: capabilities requested to be delegated by BPF token + * + * Do a check when the kernel decides whether provided BPF token should allow + * delegation of requested BPF-related capabilities. + * + * Return: Returns 0 on success, error on failure. + */ +int security_bpf_token_capable(const struct bpf_token *token, int cap) +{ + return call_int_hook(bpf_token_capable, 0, token, cap); }
/** @@@ -5501,29 -5478,18 +5536,29 @@@ */ void security_bpf_map_free(struct bpf_map *map) { - call_void_hook(bpf_map_free_security, map); + call_void_hook(bpf_map_free, map); +} + +/** + * security_bpf_prog_free() - Free a BPF program's LSM blob + * @prog: BPF program struct + * + * Clean up the security information stored inside BPF program. + */ +void security_bpf_prog_free(struct bpf_prog *prog) +{ + call_void_hook(bpf_prog_free, prog); }
/** - * security_bpf_prog_free() - Free a bpf program's LSM blob - * @aux: bpf program aux info struct + * security_bpf_token_free() - Free a BPF token's LSM blob + * @token: BPF token struct * - * Clean up the security information stored inside bpf prog. + * Clean up the security information stored inside BPF token. */ -void security_bpf_prog_free(struct bpf_prog_aux *aux) +void security_bpf_token_free(struct bpf_token *token) { - call_void_hook(bpf_prog_free_security, aux); + call_void_hook(bpf_token_free, token); } #endif /* CONFIG_BPF_SYSCALL */
diff --combined tools/testing/selftests/net/forwarding/Makefile index 1fba2717738d,4de92632f483..cdefc9a5ec34 --- a/tools/testing/selftests/net/forwarding/Makefile +++ b/tools/testing/selftests/net/forwarding/Makefile @@@ -112,7 -112,7 +112,7 @@@ TEST_PROGS = bridge_fdb_learning_limit. vxlan_symmetric_ipv6.sh \ vxlan_symmetric.sh
- TEST_PROGS_EXTENDED := devlink_lib.sh \ + TEST_FILES := devlink_lib.sh \ ethtool_lib.sh \ fib_offload_lib.sh \ forwarding.config.sample \ @@@ -129,7 -129,4 +129,7 @@@ sch_tbf_etsprio.sh \ tc_common.sh
+TEST_INCLUDES := \ + ../lib.sh + include ../../lib.mk
linux-merge@lists.open-mesh.org