The following commit has been merged in the master branch: commit 1d343579312311aa9875b34d5a921f5e2ec69f0a Merge: a8eceea84a3a3504e42f6495cf462027c5d19cb0 0d81a3f29c0afb18ba2b1275dcccf21e0dd4da38 Author: David S. Miller davem@davemloft.net Date: Thu Mar 12 21:29:30 2020 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes, nothing serious.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index 6918c3f0ff1c,cc1d18cb5d18..439151d589a4 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -693,7 -693,7 +693,7 @@@ ALLWINNER CPUFREQ DRIVE M: Yangtao Li tiny.windzz@gmail.com L: linux-pm@vger.kernel.org S: Maintained - F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt + F: Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
ALLWINNER CRYPTO DRIVERS @@@ -4017,7 -4017,7 +4017,7 @@@ M: Cheng-Yi Chiang <cychiang@chromium.o S: Maintained R: Enric Balletbo i Serra enric.balletbo@collabora.com R: Guenter Roeck groeck@chromium.org - F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt + F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml F: sound/soc/codecs/cros_ec_codec.*
CIRRUS LOGIC AUDIO CODEC DRIVERS @@@ -4073,7 -4073,6 +4073,6 @@@ F: drivers/scsi/snic CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti benve@cisco.com M: Govindarajulu Varadarajan _govind@gmx.com - M: Parvi Kaustubhi pkaustub@cisco.com S: Supported F: drivers/net/ethernet/cisco/enic/
@@@ -4475,7 -4474,7 +4474,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/sunxi/sun6i-csi/ - F: Documentation/devicetree/bindings/media/sun6i-csi.txt + F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
CW1200 WLAN driver M: Solomon Peachy pizza@shaftnet.org @@@ -4572,7 -4571,7 +4571,7 @@@ F: drivers/infiniband/hw/cxgb4 F: include/uapi/rdma/cxgb4-abi.h
CXGB4VF ETHERNET DRIVER (CXGB4VF) - M: Casey Leedom leedom@chelsio.com + M: Vishal Kulkarni vishal@gmail.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported @@@ -5668,7 -5667,7 +5667,7 @@@ L: dri-devel@lists.freedesktop.or T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/stm - F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt + F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
DRM DRIVERS FOR TI LCDC M: Jyri Sarha jsarha@ti.com @@@ -6198,7 -6197,6 +6197,6 @@@ S: Supporte F: drivers/scsi/be2iscsi/
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) - M: Sathya Perla sathya.perla@broadcom.com M: Ajit Khaparde ajit.khaparde@broadcom.com M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Somnath Kotur somnath.kotur@broadcom.com @@@ -7738,7 -7736,7 +7736,7 @@@ Hyper-V CORE AND DRIVER M: "K. Y. Srinivasan" kys@microsoft.com M: Haiyang Zhang haiyangz@microsoft.com M: Stephen Hemminger sthemmin@microsoft.com - M: Sasha Levin sashal@kernel.org + M: Wei Liu wei.liu@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git L: linux-hyperv@vger.kernel.org S: Supported @@@ -10164,7 -10162,7 +10162,7 @@@ MAXBOTIX ULTRASONIC RANGER IIO DRIVE M: Andreas Klinger ak@it-klinger.de L: linux-iio@vger.kernel.org S: Maintained - F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt + F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER @@@ -10467,7 -10465,7 +10465,7 @@@ M: Hugues Fruchet <hugues.fruchet@st.co L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported - F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt + F: Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml F: drivers/media/platform/stm32/stm32-dcmi.c
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE @@@ -11119,7 -11117,7 +11117,7 @@@ M: Thomas Bogendoerfer <tsbogend@alpha. L: linux-mips@vger.kernel.org W: http://www.linux-mips.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git - Q: http://patchwork.linux-mips.org/project/linux-mips/list/ + Q: https://patchwork.kernel.org/project/linux-mips/list/ S: Maintained F: Documentation/devicetree/bindings/mips/ F: Documentation/mips/ @@@ -12739,7 -12737,7 +12737,7 @@@ M: Tom Joseph <tjoseph@cadence.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt - F: drivers/pci/controller/pcie-cadence* + F: drivers/pci/controller/cadence/
PCI DRIVER FOR FREESCALE LAYERSCAPE M: Minghuan Lian minghuan.Lian@nxp.com @@@ -12952,7 -12950,6 +12950,6 @@@ M: Robert Richter <rrichter@marvell.com L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported - F: Documentation/devicetree/bindings/pci/pci-thunder-* F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON @@@ -13662,12 -13659,6 +13659,12 @@@ L: alsa-devel@alsa-project.org (moderat S: Supported F: sound/soc/qcom/
+QCOM IPA DRIVER +M: Alex Elder elder@kernel.org +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ipa/ + QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT M: Gabriel Somlo somlo@cmu.edu M: "Michael S. Tsirkin" mst@redhat.com @@@ -14233,7 -14224,7 +14230,7 @@@ F: include/dt-bindings/reset F: include/linux/reset.h F: include/linux/reset/ F: include/linux/reset-controller.h - K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b + K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
RESTARTABLE SEQUENCES SUPPORT M: Mathieu Desnoyers mathieu.desnoyers@efficios.com @@@ -15928,7 -15919,7 +15925,7 @@@ F: drivers/*/stm32-*timer F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* F: Documentation/ABI/testing/*timer-stm32 - F: Documentation/devicetree/bindings/*/stm32-*timer* + F: Documentation/devicetree/bindings/*/*stm32-*timer* F: Documentation/devicetree/bindings/pwm/pwm-stm32*
STMMAC ETHERNET DRIVER @@@ -16087,6 -16078,8 +16084,8 @@@ SYNOPSYS DESIGNWARE 8250 UART DRIVE R: Andy Shevchenko andriy.shevchenko@linux.intel.com S: Maintained F: drivers/tty/serial/8250/8250_dw.c + F: drivers/tty/serial/8250/8250_dwlib.* + F: drivers/tty/serial/8250/8250_lpss.c
SYNOPSYS DESIGNWARE APB GPIO DRIVER M: Hoan Tran hoan@os.amperecomputing.com @@@ -16117,13 -16110,6 +16116,13 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/synopsys/
+SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER +M: Jose Abreu Jose.Abreu@synopsys.com +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/phy/mdio-xpcs.c +F: include/linux/mdio-xpcs.h + SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula jarkko.nikula@linux.intel.com R: Andy Shevchenko andriy.shevchenko@linux.intel.com @@@ -17865,13 -17851,6 +17864,13 @@@ S: Supporte F: arch/x86/kernel/cpu/vmware.c F: arch/x86/include/asm/vmware.h
+VMWARE VIRTUAL PTP CLOCK DRIVER +M: Vivek Thampi vithampi@vmware.com +M: "VMware, Inc." pv-drivers@vmware.com +L: netdev@vger.kernel.org +S: Supported +F: drivers/ptp/ptp_vmw.c + VMWARE PVRDMA DRIVER M: Adit Ranadive aditr@vmware.com M: VMware PV-Drivers pv-drivers@vmware.com diff --combined drivers/base/core.c index fb8b7990f6fd,dbb0f9130f42..befc2722dbfc --- a/drivers/base/core.c +++ b/drivers/base/core.c @@@ -718,6 -718,8 +718,8 @@@ static void __device_links_queue_sync_s { struct device_link *link;
+ if (!dev_has_sync_state(dev)) + return; if (dev->state_synced) return;
@@@ -745,25 -747,31 +747,31 @@@ /** * device_links_flush_sync_list - Call sync_state() on a list of devices * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller * * Calls sync_state() on all the devices that have been queued for it. This - * function is used in conjunction with __device_links_queue_sync_state(). + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held. */ - static void device_links_flush_sync_list(struct list_head *list) + static void device_links_flush_sync_list(struct list_head *list, + struct device *dont_lock_dev) { struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { list_del_init(&dev->links.defer_sync);
- device_lock(dev); + if (dev != dont_lock_dev) + device_lock(dev);
if (dev->bus->sync_state) dev->bus->sync_state(dev); else if (dev->driver && dev->driver->sync_state) dev->driver->sync_state(dev);
- device_unlock(dev); + if (dev != dont_lock_dev) + device_unlock(dev);
put_device(dev); } @@@ -801,7 -809,7 +809,7 @@@ void device_links_supplier_sync_state_r out: device_links_write_unlock();
- device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, NULL); }
static int sync_state_resume_initcall(void) @@@ -813,7 -821,7 +821,7 @@@ late_initcall(sync_state_resume_initcal
static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync)) + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) list_add_tail(&sup->links.defer_sync, &deferred_sync); }
@@@ -865,6 -873,11 +873,11 @@@ void device_links_driver_bound(struct d driver_deferred_probe_add(link->consumer); }
+ if (defer_sync_state_count) + __device_links_supplier_defer_sync(dev); + else + __device_links_queue_sync_state(dev, &sync_list); + list_for_each_entry(link, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; @@@ -883,7 -896,7 +896,7 @@@
device_links_write_unlock();
- device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, dev); }
static void device_link_drop_managed(struct device_link *link) @@@ -3458,126 -3471,6 +3471,126 @@@ out } EXPORT_SYMBOL_GPL(device_move);
+static int device_attrs_change_owner(struct device *dev, kuid_t kuid, + kgid_t kgid) +{ + struct kobject *kobj = &dev->kobj; + struct class *class = dev->class; + const struct device_type *type = dev->type; + int error; + + if (class) { + /* + * Change the device groups of the device class for @dev to + * @kuid/@kgid. + */ + error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, + kgid); + if (error) + return error; + } + + if (type) { + /* + * Change the device groups of the device type for @dev to + * @kuid/@kgid. + */ + error = sysfs_groups_change_owner(kobj, type->groups, kuid, + kgid); + if (error) + return error; + } + + /* Change the device groups of @dev to @kuid/@kgid. */ + error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); + if (error) + return error; + + if (device_supports_offline(dev) && !dev->offline_disabled) { + /* Change online device attributes of @dev to @kuid/@kgid. */ + error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, + kuid, kgid); + if (error) + return error; + } + + return 0; +} + +/** + * device_change_owner - change the owner of an existing device. + * @dev: device. + * @kuid: new owner's kuid + * @kgid: new owner's kgid + * + * This changes the owner of @dev and its corresponding sysfs entries to + * @kuid/@kgid. This function closely mirrors how @dev was added via driver + * core. + * + * Returns 0 on success or error code on failure. + */ +int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) +{ + int error; + struct kobject *kobj = &dev->kobj; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + /* + * Change the kobject and the default attributes and groups of the + * ktype associated with it to @kuid/@kgid. + */ + error = sysfs_change_owner(kobj, kuid, kgid); + if (error) + goto out; + + /* + * Change the uevent file for @dev to the new owner. The uevent file + * was created in a separate step when @dev got added and we mirror + * that step here. + */ + error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, + kgid); + if (error) + goto out; + + /* + * Change the device groups, the device groups associated with the + * device class, and the groups associated with the device type of @dev + * to @kuid/@kgid. + */ + error = device_attrs_change_owner(dev, kuid, kgid); + if (error) + goto out; + + error = dpm_sysfs_change_owner(dev, kuid, kgid); + if (error) + goto out; + +#ifdef CONFIG_BLOCK + if (sysfs_deprecated && dev->class == &block_class) + goto out; +#endif + + /* + * Change the owner of the symlink located in the class directory of + * the device class associated with @dev which points to the actual + * directory entry for @dev to @kuid/@kgid. This ensures that the + * symlink shows the same permissions as its target. + */ + error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, + dev_name(dev), kuid, kgid); + if (error) + goto out; + +out: + put_device(dev); + return error; +} +EXPORT_SYMBOL_GPL(device_change_owner); + /** * device_shutdown - call ->shutdown() on each device to shutdown. */ diff --combined drivers/net/dsa/mv88e6xxx/chip.c index 483db9d133c3,2f993e673ec7..fb4c97a58bd4 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@@ -632,78 -632,33 +632,78 @@@ static void mv88e6xxx_mac_config(struc dev_err(ds->dev, "p%d: failed to configure MAC\n", port); }
-static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link) +static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) { struct mv88e6xxx_chip *chip = ds->priv; - int err; + const struct mv88e6xxx_ops *ops; + int err = 0;
- mv88e6xxx_reg_lock(chip); - err = chip->info->ops->port_set_link(chip, port, link); - mv88e6xxx_reg_unlock(chip); + ops = chip->info->ops;
- if (err) - dev_err(chip->dev, "p%d: failed to force MAC link\n", port); -} + /* Internal PHYs propagate their configuration directly to the MAC. + * External PHYs depend on whether the PPU is enabled for this port. + * FIXME: we should be using the PPU enable state here. What about + * an automedia port? + */ + if (!mv88e6xxx_phy_is_internal(ds, port) && ops->port_set_link) { + mv88e6xxx_reg_lock(chip); + err = ops->port_set_link(chip, port, LINK_FORCED_DOWN); + mv88e6xxx_reg_unlock(chip);
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface) -{ - if (mode == MLO_AN_FIXED) - mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN); + if (err) + dev_err(chip->dev, + "p%d: failed to force MAC link down\n", port); + } }
static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) { - if (mode == MLO_AN_FIXED) - mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP); + struct mv88e6xxx_chip *chip = ds->priv; + const struct mv88e6xxx_ops *ops; + int err = 0; + + ops = chip->info->ops; + + /* Internal PHYs propagate their configuration directly to the MAC. + * External PHYs depend on whether the PPU is enabled for this port. + * FIXME: we should be using the PPU enable state here. What about + * an automedia port? + */ + if (!mv88e6xxx_phy_is_internal(ds, port)) { + mv88e6xxx_reg_lock(chip); + /* FIXME: for an automedia port, should we force the link + * down here - what if the link comes up due to "other" media + * while we're bringing the port up, how is the exclusivity + * handled in the Marvell hardware? E.g. port 4 on 88E6532 + * shared between internal PHY and Serdes. + */ + if (ops->port_set_speed) { + err = ops->port_set_speed(chip, port, speed); + if (err && err != -EOPNOTSUPP) + goto error; + } + + if (ops->port_set_duplex) { + err = ops->port_set_duplex(chip, port, duplex); + if (err && err != -EOPNOTSUPP) + goto error; + } + + if (ops->port_set_link) + err = ops->port_set_link(chip, port, LINK_FORCED_UP); +error: + mv88e6xxx_reg_unlock(chip); + + if (err && err != -EOPNOTSUPP) + dev_err(ds->dev, + "p%d: failed to configure MAC link up\n", port); + } }
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) @@@ -1063,14 -1018,7 +1063,14 @@@ static void mv88e6xxx_get_ethtool_stats
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) { - return 32 * sizeof(u16); + struct mv88e6xxx_chip *chip = ds->priv; + int len; + + len = 32 * sizeof(u16); + if (chip->info->ops->serdes_get_regs_len) + len += chip->info->ops->serdes_get_regs_len(chip, port); + + return len; }
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, @@@ -1095,9 -1043,6 +1095,9 @@@ p[i] = reg; }
+ if (chip->info->ops->serdes_get_regs) + chip->info->ops->serdes_get_regs(chip, port, &p[i]); + mv88e6xxx_reg_unlock(chip); }
@@@ -1840,7 -1785,7 +1840,7 @@@ static int mv88e6xxx_broadcast_setup(st }
static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, - u16 vid, u8 member) + u16 vid, u8 member, bool warn) { const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; struct mv88e6xxx_vtu_entry vlan; @@@ -1885,7 -1830,7 +1885,7 @@@ err = mv88e6xxx_vtu_loadpurge(chip, &vlan); if (err) return err; - } else { + } else if (warn) { dev_info(chip->dev, "p%d: already a member of VLAN %d\n", port, vid); } @@@ -1899,7 -1844,6 +1899,7 @@@ static void mv88e6xxx_port_vlan_add(str struct mv88e6xxx_chip *chip = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool warn; u8 member; u16 vid;
@@@ -1913,15 -1857,10 +1913,15 @@@ else member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
+ /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port + * and then the CPU port. Do not warn for duplicates for the CPU port. + */ + warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port); + mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) - if (mv88e6xxx_port_vlan_join(chip, port, vid, member)) + if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn)) dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port, vid, untagged ? 'u' : 't');
@@@ -2830,6 -2769,8 +2830,8 @@@ static u64 mv88e6xxx_devlink_atu_bin_ge goto unlock; }
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK; + unlock: mv88e6xxx_reg_unlock(chip);
@@@ -3725,8 -3666,6 +3727,8 @@@ static const struct mv88e6xxx_ops mv88e .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_get_lane = mv88e6352_serdes_get_lane, .serdes_power = mv88e6352_serdes_power, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@@ -3821,8 -3760,6 +3823,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, .serdes_irq_enable = mv88e6352_serdes_irq_enable, .serdes_irq_status = mv88e6352_serdes_irq_status, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@@ -3912,8 -3849,6 +3914,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390_phylink_validate, @@@ -3968,8 -3903,6 +3970,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, @@@ -4023,8 -3956,6 +4025,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@@ -4079,8 -4010,6 +4081,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, .serdes_irq_enable = mv88e6352_serdes_irq_enable, .serdes_irq_status = mv88e6352_serdes_irq_status, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@@ -4176,8 -4105,6 +4178,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, @@@ -4463,8 -4390,6 +4465,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, .serdes_get_strings = mv88e6352_serdes_get_strings, .serdes_get_stats = mv88e6352_serdes_get_stats, + .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, + .serdes_get_regs = mv88e6352_serdes_get_regs, .phylink_validate = mv88e6352_phylink_validate, };
@@@ -4523,8 -4448,6 +4525,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6390_phylink_validate, };
@@@ -4580,8 -4503,6 +4582,8 @@@ static const struct mv88e6xxx_ops mv88e .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --combined drivers/net/dsa/sja1105/sja1105_main.c index 6fe679143216,7edea5741a5f..d42f085d4272 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@@ -786,9 -786,7 +786,9 @@@ static void sja1105_mac_link_down(struc static void sja1105_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) { sja1105_inhibit_tx(ds->priv, BIT(port), false); } @@@ -824,7 -822,6 +824,7 @@@ static void sja1105_phylink_validate(st phylink_set(mask, MII); phylink_set(mask, 10baseT_Full); phylink_set(mask, 100baseT_Full); + phylink_set(mask, 100baseT1_Full); if (mii->xmii_mode[port] == XMII_MODE_RGMII) phylink_set(mask, 1000baseT_Full);
@@@ -1744,7 -1741,8 +1744,8 @@@ static void sja1105_teardown(struct dsa if (!dsa_is_user_port(ds, port)) continue;
- kthread_destroy_worker(sp->xmit_worker); + if (sp->xmit_worker) + kthread_destroy_worker(sp->xmit_worker); }
sja1105_tas_teardown(ds); diff --combined drivers/net/ethernet/broadcom/bcmsysport.c index bea2dbc0e469,15b31cddc054..af7ce5c5488c --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@@ -287,6 -287,7 +287,6 @@@ static void bcm_sysport_get_drvinfo(str struct ethtool_drvinfo *info) { strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->version, "0.1", sizeof(info->version)); strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); }
@@@ -623,7 -624,8 +623,7 @@@ static int bcm_sysport_set_coalesce(str return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || - (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) || - ec->use_adaptive_tx_coalesce) + (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) return -EINVAL;
for (i = 0; i < dev->num_tx_queues; i++) @@@ -2133,7 -2135,7 +2133,7 @@@ static int bcm_sysport_rule_set(struct return -ENOSPC;
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); - if (index > RXCHK_BRCM_TAG_MAX) + if (index >= RXCHK_BRCM_TAG_MAX) return -ENOSPC;
/* Location is the classification ID, and index is the position @@@ -2208,9 -2210,6 +2208,9 @@@ static int bcm_sysport_set_rxnfc(struc }
static const struct ethtool_ops bcm_sysport_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_drvinfo = bcm_sysport_get_drvinfo, .get_msglevel = bcm_sysport_get_msglvl, .set_msglevel = bcm_sysport_set_msglvl, diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4c9696a3978a,c5c8effc0139..663dcf614004 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -70,8 -70,12 +70,8 @@@
#define BNXT_TX_TIMEOUT (5 * HZ)
-static const char version[] = - "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); -MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) #define BNXT_RX_DMA_OFFSET NET_SKB_PAD @@@ -2162,7 -2166,6 +2162,7 @@@ static int __bnxt_poll_work(struct bnx struct tx_cmp *txcmp;
cpr->has_more_work = 0; + cpr->had_work_done = 1; while (1) { int rc;
@@@ -2176,6 -2179,7 +2176,6 @@@ * reading any further. */ dma_rmb(); - cpr->had_work_done = 1; if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ @@@ -2392,7 -2396,7 +2392,7 @@@ static int __bnxt_poll_cqs(struct bnxt }
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, - u64 dbr_type, bool all) + u64 dbr_type) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int i; @@@ -2401,7 -2405,7 +2401,7 @@@ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; struct bnxt_db_info *db;
- if (cpr2 && (all || cpr2->had_work_done)) { + if (cpr2 && cpr2->had_work_done) { db = &cpr2->cp_db; writeq(db->db_key64 | dbr_type | RING_CMP(cpr2->cp_raw_cons), db->doorbell); @@@ -2424,16 -2428,22 +2424,16 @@@ static int bnxt_poll_p5(struct napi_str if (cpr->has_more_work) { cpr->has_more_work = 0; work_done = __bnxt_poll_cqs(bp, bnapi, budget); - if (cpr->has_more_work) { - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); - return work_done; - } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); - if (napi_complete_done(napi, work_done)) - BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); - return work_done; } while (1) { cons = RING_CMP(raw_cons); nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!NQ_CMP_VALID(nqcmp, raw_cons)) { - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, - false); + if (cpr->has_more_work) + break; + + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); cpr->cp_raw_cons = raw_cons; if (napi_complete_done(napi, work_done)) BNXT_DB_NQ_ARM_P5(&cpr->cp_db, @@@ -2453,17 -2463,16 +2453,17 @@@ cpr2 = cpr->cp_ring_arr[idx]; work_done += __bnxt_poll_work(bp, cpr2, budget - work_done); - cpr->has_more_work = cpr2->has_more_work; + cpr->has_more_work |= cpr2->has_more_work; } else { bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); } raw_cons = NEXT_RAW_CMP(raw_cons); - if (cpr->has_more_work) - break; } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); - cpr->cp_raw_cons = raw_cons; + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); + if (raw_cons != cpr->cp_raw_cons) { + cpr->cp_raw_cons = raw_cons; + BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); + } return work_done; }
@@@ -4161,7 -4170,6 +4161,7 @@@ static int bnxt_hwrm_to_stderr(u32 hwrm case HWRM_ERR_CODE_NO_BUFFER: return -ENOMEM; case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: return -EAGAIN; case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: return -EOPNOTSUPP; @@@ -5061,8 -5069,10 +5061,8 @@@ vnic_mru return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
-static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { - u32 rc = 0; - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { struct hwrm_vnic_free_input req = {0};
@@@ -5070,9 -5080,10 +5070,9 @@@ req.vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } - return rc; }
static void bnxt_hwrm_vnic_free(struct bnxt *bp) @@@ -5189,13 -5200,14 +5189,13 @@@ static int bnxt_hwrm_ring_grp_alloc(str return rc; }
-static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) +static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { u16 i; - u32 rc = 0; struct hwrm_ring_grp_free_input req = {0};
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) - return 0; + return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@@ -5206,10 -5218,12 +5206,10 @@@ req.ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } mutex_unlock(&bp->hwrm_cmd_lock); - return rc; }
static int hwrm_ring_alloc_send_msg(struct bnxt *bp, @@@ -5833,7 -5847,8 +5833,7 @@@ bnxt_hwrm_reserve_pf_rings(struct bnxt if (bp->hwrm_spec_code < 0x10601) bp->hw_resc.resv_tx_rings = tx_rings;
- rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); }
static int @@@ -5854,7 -5869,8 +5854,7 @@@ bnxt_hwrm_reserve_vf_rings(struct bnxt if (rc) return rc;
- rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); }
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, @@@ -6014,6 -6030,7 +6014,6 @@@ static int bnxt_hwrm_check_vf_rings(str { struct hwrm_func_vf_cfg_input req = {0}; u32 flags; - int rc;
if (!BNXT_NEW_RM(bp)) return 0; @@@ -6030,8 -6047,8 +6030,8 @@@ flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@@ -6040,6 -6057,7 +6040,6 @@@ { struct hwrm_func_cfg_input req = {0}; u32 flags; - int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, stats, vnics); @@@ -6057,8 -6075,8 +6057,8 @@@ }
req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@@ -6297,16 -6315,16 +6297,16 @@@ int bnxt_hwrm_set_coal(struct bnxt *bp return rc; }
-static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) +static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - int rc = 0, i; struct hwrm_stat_ctx_free_input req = {0}; + int i;
if (!bp->bnapi) - return 0; + return;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - return 0; + return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
@@@ -6318,13 -6336,14 +6318,13 @@@ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT);
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } mutex_unlock(&bp->hwrm_cmd_lock); - return rc; }
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) @@@ -6529,8 -6548,8 +6529,8 @@@ static int bnxt_hwrm_func_backing_store __le64 *pg_dir; u32 flags = 0; u8 *pg_attr; - int i, rc; u32 ena; + int i;
if (!ctx) return 0; @@@ -6617,7 -6636,8 +6617,7 @@@ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } req.flags = cpu_to_le32(flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, @@@ -7321,6 -7341,7 +7321,6 @@@ int bnxt_hwrm_fw_set_time(struct bnxt *
static int bnxt_hwrm_port_qstats(struct bnxt *bp) { - int rc; struct bnxt_pf_info *pf = &bp->pf; struct hwrm_port_qstats_input req = {0};
@@@ -7331,7 -7352,8 +7331,7 @@@ req.port_id = cpu_to_le16(pf->port_id); req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) @@@ -7485,6 -7507,7 +7485,6 @@@ static void bnxt_hwrm_resource_free(str static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { struct hwrm_func_cfg_input req = {0}; - int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); @@@ -7495,12 -7518,14 +7495,12 @@@ req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { struct hwrm_func_cfg_input req = {0}; - int rc;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; @@@ -7512,7 -7537,8 +7512,7 @@@ if (size == 128) req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) @@@ -8770,7 -8796,6 +8770,7 @@@ static int bnxt_hwrm_if_change(struct b bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + bnxt_dcb_free(bp); rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@@ -8866,12 -8891,14 +8866,12 @@@ int bnxt_hwrm_alloc_wol_fltr(struct bnx int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { struct hwrm_wol_filter_free_input req = {0}; - int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); req.port_id = cpu_to_le16(bp->pf.port_id); req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); req.wol_filter_id = bp->wol_filter_id; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) @@@ -10955,13 -10982,13 +10955,13 @@@ static int bnxt_change_mtu(struct net_d struct bnxt *bp = netdev_priv(dev);
if (netif_running(dev)) - bnxt_close_nic(bp, false, false); + bnxt_close_nic(bp, true, false);
dev->mtu = new_mtu; bnxt_set_ring_params(bp);
if (netif_running(dev)) - return bnxt_open_nic(bp, false, false); + return bnxt_open_nic(bp, true, false);
return 0; } @@@ -11429,8 -11456,6 +11429,8 @@@ static void bnxt_remove_one(struct pci_ bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true); + if (BNXT_PF(bp)) + devlink_port_type_clear(&bp->dl_port); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); bnxt_dl_unregister(bp); @@@ -11730,22 -11755,27 +11730,22 @@@ static int bnxt_init_mac_addr(struct bn static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) { struct pci_dev *pdev = bp->pdev; - int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - u32 dw; + u64 qword;
- if (!pos) { - netdev_info(bp->dev, "Unable do read adapter's DSN\n"); + qword = pci_get_dsn(pdev); + if (!qword) { + netdev_info(bp->dev, "Unable to read adapter's DSN\n"); return -EOPNOTSUPP; }
- /* DSN (two dw) is at an offset of 4 from the cap pos */ - pos += 4; - pci_read_config_dword(pdev, pos, &dw); - put_unaligned_le32(dw, &dsn[0]); - pci_read_config_dword(pdev, pos + 4, &dw); - put_unaligned_le32(dw, &dsn[4]); + put_unaligned_le64(qword, dsn); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; }
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed; struct net_device *dev; struct bnxt *bp; int rc, max_irqs; @@@ -11753,6 -11783,9 +11753,6 @@@ if (pci_is_bridge(pdev)) return -ENODEV;
- if (version_printed++ == 0) - pr_info("%s", version); - /* Clear any pending DMA transactions from crash kernel * while loading driver in capture kernel. */ diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index cc807ba6f163,1f67e6729a2c..677bab95b937 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@@ -1236,6 -1236,7 +1236,6 @@@ static void bnxt_get_drvinfo(struct net struct bnxt *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = bnxt_get_num_stats(bp); @@@ -2006,8 -2007,8 +2006,8 @@@ int bnxt_flash_package_from_file(struc struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; const struct firmware *fw; - int rc, hwrm_err = 0; u32 item_len; + int rc = 0; u16 index;
bnxt_hwrm_fw_set_time(bp); @@@ -2051,15 -2052,14 +2051,14 @@@ memcpy(kmem, fw->data, fw->size); modify.host_src_addr = cpu_to_le64(dma_handle);
- hwrm_err = hwrm_send_message(bp, &modify, - sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + rc = hwrm_send_message(bp, &modify, sizeof(modify), + FLASH_PACKAGE_TIMEOUT); dma_free_coherent(&bp->pdev->dev, fw->size, kmem, dma_handle); } } release_firmware(fw); - if (rc || hwrm_err) + if (rc) goto err_exit;
if ((install_type & 0xffff) == 0) @@@ -2068,20 -2068,19 +2067,19 @@@ install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock); - hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) { + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
if (resp->error_code && error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - hwrm_err = _hwrm_send_message(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); } - if (hwrm_err) + if (rc) goto flash_pkg_exit; }
@@@ -2093,7 -2092,7 +2091,7 @@@ flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); err_exit: - if (hwrm_err == -EACCES) + if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } @@@ -2606,7 -2605,7 +2604,7 @@@ static int bnxt_set_phys_id(struct net_ struct bnxt_led_cfg *led_cfg; u8 led_state; __le16 duration; - int i, rc; + int i;
if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; @@@ -2632,7 -2631,8 +2630,7 @@@ led_cfg->led_blink_off = duration; led_cfg->led_group_id = bp->leds[i].led_group_id; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) @@@ -3471,12 -3471,6 +3469,12 @@@ void bnxt_ethtool_free(struct bnxt *bp }
const struct ethtool_ops bnxt_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_STATS_BLOCK_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3da25a2b5cc7,97f90edbc068..75fde0d4d493 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -90,6 -90,11 +90,6 @@@
char cxgb4_driver_name[] = KBUILD_MODNAME;
-#ifdef DRV_VERSION -#undef DRV_VERSION -#endif -#define DRV_VERSION "2.0.0-ko" -const char cxgb4_driver_version[] = DRV_VERSION; #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ @@@ -132,6 -137,7 +132,6 @@@ MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); @@@ -3620,6 -3626,8 +3620,6 @@@ static void cxgb4_mgmt_get_drvinfo(stru struct adapter *adapter = netdev2adap(dev);
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); - strlcpy(info->version, cxgb4_driver_version, - sizeof(info->version)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } @@@ -5373,12 -5381,11 +5373,11 @@@ static inline bool is_x_10g_port(const static int cfg_queues(struct adapter *adap) { u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; + u32 i, n10g = 0, qidx = 0, n1g = 0; + u32 ncpus = num_online_cpus(); u32 niqflint, neq, num_ulds; struct sge *s = &adap->sge; - u32 i, n10g = 0, qidx = 0; - #ifndef CONFIG_CHELSIO_T4_DCB - int q10g = 0; - #endif + u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { @@@ -5416,44 -5423,50 +5415,50 @@@ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); + + /* We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; + + n1g = adap->params.nports - n10g; #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its * own TX Queue in order to prevent Head-Of-Line Blocking. */ + q1g = 8; if (adap->params.nports * 8 > avail_eth_qsets) { dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", avail_eth_qsets, adap->params.nports * 8); return -ENOMEM; }
- for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); + if (adap->params.nports * ncpus < avail_eth_qsets) + q10g = max(8U, ncpus); + else + q10g = max(8U, q10g);
- pi->first_qset = qidx; - pi->nqsets = is_kdump_kernel() ? 1 : 8; - qidx += pi->nqsets; - } - #else /* !CONFIG_CHELSIO_T4_DCB */ - /* We default to 1 queue per non-10G port and up to # of cores queues - * per 10G port. - */ - if (n10g) - q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; - if (q10g > netif_get_num_default_rss_queues()) - q10g = netif_get_num_default_rss_queues(); + while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g)) + q10g--;
- if (is_kdump_kernel()) + #else /* !CONFIG_CHELSIO_T4_DCB */ + q1g = 1; + q10g = min(q10g, ncpus); + #endif /* !CONFIG_CHELSIO_T4_DCB */ + if (is_kdump_kernel()) { q10g = 1; + q1g = 1; + }
for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx; - pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; qidx += pi->nqsets; } - #endif /* !CONFIG_CHELSIO_T4_DCB */
s->ethqsets = qidx; s->max_ethqsets = qidx; /* MSI-X may lower it later */ @@@ -5465,7 -5478,7 +5470,7 @@@ * capped by the number of available cores. */ num_ulds = adap->num_uld + adap->num_ofld_uld; - i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus()); + i = min_t(u32, MAX_OFLD_QSETS, ncpus); avail_uld_qsets = roundup(i, adap->params.nports); if (avail_qsets < num_ulds * adap->params.nports) { adap->params.offload = 0; @@@ -6073,6 -6086,8 +6078,6 @@@ static int init_one(struct pci_dev *pde int i, err; u32 whoami;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { /* Just info, some other driver may have claimed the device. */ diff --combined drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index c0fe305b9d16,ca74a684a904..46039d80bb43 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@@ -1,4 -1,5 +1,5 @@@ /* Copyright 2008 - 2016 Freescale Semiconductor Inc. + * Copyright 2020 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@@ -123,7 -124,22 +124,22 @@@ MODULE_PARM_DESC(tx_timeout, "The Tx ti #define FSL_QMAN_MAX_OAL 127
/* Default alignment for start of data in an Rx FD */ + #ifdef CONFIG_DPAA_ERRATUM_A050385 + /* aligning data start to 64 avoids DMA transaction splits, unless the buffer + * is crossing a 4k page boundary + */ + #define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16) + /* aligning to 256 avoids DMA transaction splits caused by 4k page boundary + * crossings; also, all SG fragments except the last must have a size multiple + * of 256 to avoid DMA transaction splits + */ + #define DPAA_A050385_ALIGN 256 + #define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \ + DPAA_A050385_ALIGN : 16) + #else #define DPAA_FD_DATA_ALIGNMENT 16 + #define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT + #endif
/* The DPAA requires 256 bytes reserved and mapped for the SGT */ #define DPAA_SGT_SIZE 256 @@@ -158,8 -174,13 +174,13 @@@ #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) #define DPAA_TIME_STAMP_SIZE 8 #define DPAA_HASH_RESULTS_SIZE 8 + #ifdef CONFIG_DPAA_ERRATUM_A050385 + #define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\ + + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE)) + #else #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ dpaa_rx_extra_headroom) + #endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@@ -180,7 -201,12 +201,12 @@@ static struct dpaa_bp *dpaa_bp_array[BM
#define DPAA_BP_RAW_SIZE 4096
+ #ifdef CONFIG_DPAA_ERRATUM_A050385 + #define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \ + ~(DPAA_A050385_ALIGN - 1)) + #else #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) + #endif
static int dpaa_max_frm;
@@@ -233,20 -259,8 +259,20 @@@ static int dpaa_netdev_init(struct net_ net_dev->features |= net_dev->hw_features; net_dev->vlan_features = net_dev->features;
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + if (is_valid_ether_addr(mac_addr)) { + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else { + eth_hw_addr_random(net_dev); + err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)net_dev->dev_addr); + if (err) { + dev_err(dev, "Failed to set random MAC address\n"); + return -EINVAL; + } + dev_info(dev, "Using random MAC address: %pM\n", + net_dev->dev_addr); + }
net_dev->ethtool_ops = &dpaa_ethtool_ops;
@@@ -1204,7 -1218,7 +1230,7 @@@ static int dpaa_eth_init_rx_port(struc buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; buf_prefix_content.pass_time_stamp = true; - buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; + buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
rx_p = ¶ms.specific_params.rx_params; rx_p->err_fqid = errq->fqid; @@@ -1674,6 -1688,8 +1700,8 @@@ static u8 rx_csum_offload(const struct return CHECKSUM_NONE; }
+ #define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a))) + /* Build a linear skb around the received buffer. * We are guaranteed there is enough room at the end of the data buffer to * accommodate the shared info area of the skb. @@@ -1745,8 -1761,7 +1773,7 @@@ static struct sk_buff *sg_fd_to_skb(con
sg_addr = qm_sg_addr(&sgt[i]); sg_vaddr = phys_to_virt(sg_addr); - WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, - SMP_CACHE_BYTES)); + WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
dma_unmap_page(priv->rx_dma_dev, sg_addr, DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); @@@ -2034,6 -2049,75 +2061,75 @@@ static inline int dpaa_xmit(struct dpaa return 0; }
+ #ifdef CONFIG_DPAA_ERRATUM_A050385 + int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s) + { + struct dpaa_priv *priv = netdev_priv(net_dev); + struct sk_buff *new_skb, *skb = *s; + unsigned char *start, i; + + /* check linear buffer alignment */ + if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) + goto workaround; + + /* linear buffers just need to have an aligned start */ + if (!skb_is_nonlinear(skb)) + return 0; + + /* linear data size for nonlinear skbs needs to be aligned */ + if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN)) + goto workaround; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + /* all fragments need to have aligned start addresses */ + if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN)) + goto workaround; + + /* all but last fragment need to have aligned sizes */ + if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) && + (i < skb_shinfo(skb)->nr_frags - 1)) + goto workaround; + } + + return 0; + + workaround: + /* copy all the skb content into a new linear buffer */ + new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + + priv->tx_headroom); + if (!new_skb) + return -ENOMEM; + + /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */ + skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); + + /* Workaround for DPAA_A050385 requires data start to be aligned */ + start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); + if (start - new_skb->data != 0) + skb_reserve(new_skb, start - new_skb->data); + + skb_put(new_skb, skb->len); + skb_copy_bits(skb, 0, new_skb->data, skb->len); + skb_copy_header(new_skb, skb); + new_skb->dev = skb->dev; + + /* We move the headroom when we align it so we have to reset the + * network and transport header offsets relative to the new data + * pointer. The checksum offload relies on these offsets. + */ + skb_set_network_header(new_skb, skb_network_offset(skb)); + skb_set_transport_header(new_skb, skb_transport_offset(skb)); + + /* TODO: does timestamping need the result in the old skb? */ + dev_kfree_skb(skb); + *s = new_skb; + + return 0; + } + #endif + static netdev_tx_t dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { @@@ -2080,6 -2164,14 +2176,14 @@@ nonlinear = skb_is_nonlinear(skb); }
+ #ifdef CONFIG_DPAA_ERRATUM_A050385 + if (unlikely(fman_has_errata_a050385())) { + if (dpaa_a050385_wa(net_dev, &skb)) + goto enomem; + nonlinear = skb_is_nonlinear(skb); + } + #endif + if (nonlinear) { /* Just create a S/G fd based on the skb */ err = skb_to_sg_fd(priv, skb, &fd); @@@ -2753,9 -2845,7 +2857,7 @@@ static inline u16 dpaa_get_headroom(str headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
- return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, - DPAA_FD_DATA_ALIGNMENT) : - headroom; + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); }
static int dpaa_eth_probe(struct platform_device *pdev) diff --combined drivers/net/ethernet/freescale/fec_main.c index ce154695f67c,23c5fef2f1ad..c1c267b61647 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -2128,6 -2128,7 +2128,6 @@@ static void fec_enet_get_drvinfo(struc
strlcpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); }
@@@ -2528,15 -2529,15 +2528,15 @@@ fec_enet_set_coalesce(struct net_devic return -EINVAL; }
- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); if (cycle > 0xFFFF) { dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); return -EINVAL; }
- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); if (cycle > 0xFFFF) { - dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); return -EINVAL; }
@@@ -2641,8 -2642,6 +2641,8 @@@ fec_enet_set_wol(struct net_device *nde }
static const struct ethtool_ops fec_enet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = fec_enet_get_drvinfo, .get_regs_len = fec_enet_get_regs_len, .get_regs = fec_enet_get_regs, @@@ -3794,7 -3793,6 +3794,7 @@@ static struct platform_driver fec_drive .name = DRIVER_NAME, .pm = &fec_pm_ops, .of_match_table = fec_dt_ids, + .suppress_bind_attrs = true, }, .id_table = fec_devtype, .probe = fec_probe, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c54f262ac333,a7f40aa1a0ea..8e04d3909321 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@@ -1711,7 -1711,7 +1711,7 @@@ static int hns3_setup_tc(struct net_dev netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; + kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; }
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, @@@ -2228,7 -2228,7 +2228,7 @@@ static void hns3_reset_prepare(struct p { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr prepare\n"); + dev_info(&pdev->dev, "FLR prepare\n"); if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) ae_dev->ops->flr_prepare(ae_dev); } @@@ -2237,7 -2237,7 +2237,7 @@@ static void hns3_reset_done(struct pci_ { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr done\n"); + dev_info(&pdev->dev, "FLR done\n"); if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) ae_dev->ops->flr_done(ae_dev); } diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index cdf7f4bdef86,d3b0cd74ecd2..75d0d0fcd69b --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@@ -824,8 -824,6 +824,8 @@@ static void hclge_get_mac_stat(struct h static int hclge_parse_func_status(struct hclge_dev *hdev, struct hclge_func_status_cmd *status) { +#define HCLGE_MAC_ID_MASK 0xF + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) return -EINVAL;
@@@ -835,7 -833,6 +835,7 @@@ else hdev->flag &= ~HCLGE_FLAG_MAIN;
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; return 0; }
@@@ -2449,10 -2446,12 +2449,12 @@@ static int hclge_cfg_mac_speed_dup_hw(s
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) { + struct hclge_mac *mac = &hdev->hw.mac; int ret;
duplex = hclge_check_speed_dup(duplex, speed); - if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex) return 0;
ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); @@@ -3442,7 -3441,7 +3444,7 @@@ static void hclge_do_reset(struct hclge u32 val;
if (hclge_get_hw_reset_stat(handle)) { - dev_info(&pdev->dev, "Hardware reset not finish\n"); + dev_info(&pdev->dev, "hardware reset not finish\n"); dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); @@@ -3451,20 -3450,20 +3453,20 @@@
switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: + dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); - dev_info(&pdev->dev, "Global Reset requested\n"); break; case HNAE3_FUNC_RESET: - dev_info(&pdev->dev, "PF Reset requested\n"); + dev_info(&pdev->dev, "PF reset requested\n"); /* schedule again to check later */ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; default: dev_warn(&pdev->dev, - "Unsupported reset type: %d\n", hdev->reset_type); + "unsupported reset type: %d\n", hdev->reset_type); break; } } @@@ -7354,6 -7353,7 +7356,6 @@@ int hclge_add_mc_addr_common(struct hcl return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { @@@ -7398,6 -7398,7 +7400,6 @@@ int hclge_rm_mc_addr_common(struct hclg }
memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@@ -7617,17 -7618,11 +7619,17 @@@ static int hclge_set_vf_mac(struct hnae }
ether_addr_copy(vport->vf_info.mac, mac_addr); - dev_info(&hdev->pdev->dev, - "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", - vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport); + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", + vf, mac_addr); + return hclge_inform_reset_assert_to_vf(vport); + } + + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n", + vf, mac_addr); + return 0; }
static int hclge_add_mgr_tbl(struct hclge_dev *hdev, @@@ -7750,16 -7745,27 +7752,27 @@@ static int hclge_set_vlan_filter_ctrl(s struct hclge_desc desc; int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); - + /* read current vlan filter parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; - req->vlan_fe = filter_en ? fe_type : 0; req->vf_id = vf_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vlan filter config, ret = %d.\n", ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_cmd_reuse_desc(&desc, false); + req->vlan_fe = filter_en ? + (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", + dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n", ret);
return ret; @@@ -8277,6 -8283,7 +8290,7 @@@ void hclge_rm_vport_all_vlan_table(stru kfree(vlan); } } + clear_bit(vport->vport_id, hdev->vf_vlan_full); }
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) @@@ -8493,6 -8500,28 +8507,28 @@@ static int hclge_set_vf_vlan_filter(str } }
+ static void hclge_clear_vf_vlan(struct hclge_dev *hdev) + { + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + int ret; + int vf; + + /* clear port base vlan for all vf */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + vport = &hdev->vport[vf]; + vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vf vlan for vf%d, ret = %d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } + } + int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { @@@ -9080,8 -9109,8 +9116,8 @@@ init_nic_err static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, struct hclge_vport *vport) { - struct hnae3_client *client = vport->roce.client; struct hclge_dev *hdev = ae_dev->priv; + struct hnae3_client *client; int rst_cnt; int ret;
@@@ -9902,6 -9931,7 +9938,7 @@@ static void hclge_uninit_ae_dev(struct struct hclge_mac *mac = &hdev->hw.mac;
hclge_reset_vf_rate(hdev); + hclge_clear_vf_vlan(hdev); hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev);
@@@ -10256,9 -10286,8 +10293,9 @@@ static int hclge_dfx_reg_fetch_data(str static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) { u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); - int data_len_per_desc, data_len, bd_num, i; + int data_len_per_desc, bd_num, i; int bd_num_list[BD_LIST_MAX_NUM]; + u32 data_len; int ret;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index f199a2383a75,0510d85a7f6a..bd4bbcdde7d1 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@@ -2002,10 -2002,7 +2002,10 @@@ static enum hclgevf_evt_cause hclgevf_c return HCLGEVF_VECTOR0_EVENT_MBX; }
- dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); + /* print other vector0 event source */ + dev_info(&hdev->pdev->dev, + "vector 0 interrupt from unknown source, cmdq_src = %#x\n", + cmdq_stat_reg);
return HCLGEVF_VECTOR0_EVENT_OTHER; } @@@ -2806,6 -2803,9 +2806,9 @@@ static void hclgevf_uninit_hdev(struct { hclgevf_state_uninit(hdev);
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0, + false, NULL, 0); + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { hclgevf_misc_irq_uninit(hdev); hclgevf_uninit_msi(hdev); diff --combined drivers/net/ethernet/mscc/ocelot.c index 06f9d013f807,d3b7373c5961..18e9ffa21cd4 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@@ -442,23 -442,8 +442,23 @@@ void ocelot_adjust_link(struct ocelot * ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA | mode, DEV_MAC_MODE_CFG);
- if (ocelot->ops->pcs_init) - ocelot->ops->pcs_init(ocelot, port); + /* Disable HDX fast control */ + ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, + DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, + PCS1G_MODE_CFG); + ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
/* Enable MAC module */ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | @@@ -1413,7 -1398,7 +1413,7 @@@ void ocelot_bridge_stp_state_set(struc * a source for the other ports. */ for (p = 0; p < ocelot->num_phys_ports; p++) { - if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) { + if (ocelot->bridge_fwd_mask & BIT(p)) { unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) { @@@ -1428,10 -1413,18 +1428,10 @@@ } }
- /* Avoid the NPI port from looping back to itself */ - if (p != ocelot->cpu) - mask |= BIT(ocelot->cpu); - ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + p); } else { - /* Only the CPU port, this is compatible with link - * aggregation. - */ - ocelot_write_rix(ocelot, - BIT(ocelot->cpu), + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + p); } } @@@ -2183,24 -2176,29 +2183,29 @@@ static int ocelot_init_timestamp(struc return 0; }
- static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu) + /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. + * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. + */ + static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; int atop_wm;
- ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG); + ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis - * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ - * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ + * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ + * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ */ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | SYS_PAUSE_CFG_PAUSE_STOP(101) | SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
/* Tail dropping watermark */ - atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ; - ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu), + atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / + OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen), SYS_ATOP, port); ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); } @@@ -2229,9 -2227,10 +2234,10 @@@ void ocelot_init_port(struct ocelot *oc DEV_MAC_HDX_CFG);
/* Set Max Length and maximum tags allowed */ - ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN); + ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN); ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA | DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, DEV_MAC_TAGS_CFG);
@@@ -2300,62 -2299,42 +2306,62 @@@ int ocelot_probe_port(struct ocelot *oc } EXPORT_SYMBOL(ocelot_probe_port);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, - enum ocelot_tag_prefix injection, - enum ocelot_tag_prefix extraction) +/* Configure and enable the CPU port module, which is a set of queues. + * If @npi contains a valid port index, the CPU port module is connected + * to the Node Processor Interface (NPI). This is the mode through which + * frames can be injected from and extracted to an external CPU, + * over Ethernet. + */ +void ocelot_configure_cpu(struct ocelot *ocelot, int npi, + enum ocelot_tag_prefix injection, + enum ocelot_tag_prefix extraction) { - /* Configure and enable the CPU port. */ + int cpu = ocelot->num_phys_ports; + + /* The unicast destination PGID for the CPU port module is unused */ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); + /* Instead set up a multicast destination PGID for traffic copied to + * the CPU. Whitelisted MAC addresses like the port netdevice MAC + * addresses will be copied to the CPU via this PGID. + */ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | ANA_PORT_PORT_CFG_PORTID_VAL(cpu), ANA_PORT_PORT_CFG, cpu);
- /* If the CPU port is a physical port, set up the port in Node - * Processor Interface (NPI) mode. This is the mode through which - * frames can be injected from and extracted to an external CPU. - * Only one port can be an NPI at the same time. - */ - if (cpu < ocelot->num_phys_ports) { + if (npi >= 0 && npi < ocelot->num_phys_ports) { - int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN; + int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | - QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu), + QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi), QSYS_EXT_CPU_CFG);
if (injection == OCELOT_TAG_PREFIX_SHORT) - mtu += OCELOT_SHORT_PREFIX_LEN; + sdu += OCELOT_SHORT_PREFIX_LEN; else if (injection == OCELOT_TAG_PREFIX_LONG) - mtu += OCELOT_LONG_PREFIX_LEN; + sdu += OCELOT_LONG_PREFIX_LEN;
- ocelot_port_set_mtu(ocelot, npi, mtu); + ocelot_port_set_maxlen(ocelot, cpu, sdu); + + /* Enable NPI port */ + ocelot_write_rix(ocelot, + QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, npi); + /* NPI port Injection/Extraction configuration */ + ocelot_write_rix(ocelot, + SYS_PORT_MODE_INCL_XTR_HDR(extraction) | + SYS_PORT_MODE_INCL_INJ_HDR(injection), + SYS_PORT_MODE, npi); }
- /* CPU port Injection/Extraction configuration */ + /* Enable CPU port module */ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | QSYS_SWITCH_PORT_MODE_PORT_ENA, QSYS_SWITCH_PORT_MODE, cpu); + /* CPU port Injection/Extraction configuration */ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) | SYS_PORT_MODE_INCL_INJ_HDR(injection), SYS_PORT_MODE, cpu); @@@ -2365,8 -2344,10 +2371,8 @@@ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), ANA_PORT_VLAN_CFG, cpu); - - ocelot->cpu = cpu; } -EXPORT_SYMBOL(ocelot_set_cpu_port); +EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot) { @@@ -2518,6 -2499,7 +2524,6 @@@ void ocelot_deinit(struct ocelot *ocelo cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); - ocelot_ace_deinit(); if (ocelot->ptp_clock) ptp_clock_unregister(ocelot->ptp_clock);
diff --combined drivers/net/ethernet/pensando/ionic/ionic_lif.c index aaf4a40fa98b,c2f5b691e0fa..b903016193df --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@@ -84,7 -84,7 +84,7 @@@ static void ionic_link_status_check(str netdev_info(netdev, "Link up - %d Gbps\n", le32_to_cpu(lif->info->status.link_speed) / 1000);
- if (test_bit(IONIC_LIF_UP, lif->state)) { + if (test_bit(IONIC_LIF_F_UP, lif->state)) { netif_tx_wake_all_queues(lif->netdev); netif_carrier_on(netdev); } @@@ -93,12 -93,12 +93,12 @@@
/* carrier off first to avoid watchdog timeout */ netif_carrier_off(netdev); - if (test_bit(IONIC_LIF_UP, lif->state)) + if (test_bit(IONIC_LIF_F_UP, lif->state)) netif_tx_stop_all_queues(netdev); }
link_out: - clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state); + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); }
static void ionic_link_status_check_request(struct ionic_lif *lif) @@@ -106,7 -106,7 +106,7 @@@ struct ionic_deferred_work *work;
/* we only need one request outstanding at a time */ - if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state)) + if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return;
if (in_interrupt()) { @@@ -424,9 -424,8 +424,9 @@@ static int ionic_qcq_alloc(struct ionic ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, IONIC_INTR_MASK_SET);
- new->intr.cpu = new->intr.index % num_online_cpus(); - if (cpu_online(new->intr.cpu)) + new->intr.cpu = cpumask_local_spread(new->intr.index, + dev_to_node(dev)); + if (new->intr.cpu != -1) cpumask_set_cpu(new->intr.cpu, &new->intr.affinity_mask); } else { @@@ -1094,7 -1093,6 +1094,7 @@@ static int ionic_set_nic_features(struc u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | IONIC_ETH_HW_VLAN_RX_STRIP | IONIC_ETH_HW_VLAN_RX_FILTER; + u64 old_hw_features; int err;
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); @@@ -1102,13 -1100,9 +1102,13 @@@ if (err) return err;
+ old_hw_features = lif->hw_features; lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & ctx.comp.lif_setattr.features);
+ if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) + ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); + if ((vlan_flags & features) && !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); @@@ -1155,10 -1149,6 +1155,10 @@@ static int ionic_init_nic_features(stru netdev_features_t features; int err;
+ /* no netdev features on the management device */ + if (lif->ionic->is_mgmt_nic) + return 0; + /* set up what we expect to support by default */ features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | @@@ -1366,15 -1356,13 +1366,15 @@@ int ionic_lif_rss_config(struct ionic_l .cmd.lif_setattr = { .opcode = IONIC_CMD_LIF_SETATTR, .attr = IONIC_LIF_ATTR_RSS, - .rss.types = cpu_to_le16(types), .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), }, }; unsigned int i, tbl_sz;
- lif->rss_types = types; + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { + lif->rss_types = types; + ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); + }
if (key) memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); @@@ -1590,7 -1578,7 +1590,7 @@@ int ionic_open(struct net_device *netde netif_set_real_num_tx_queues(netdev, lif->nxqs); netif_set_real_num_rx_queues(netdev, lif->nxqs);
- set_bit(IONIC_LIF_UP, lif->state); + set_bit(IONIC_LIF_F_UP, lif->state);
ionic_link_status_check_request(lif); if (netif_carrier_ok(netdev)) @@@ -1610,13 -1598,13 +1610,13 @@@ int ionic_stop(struct net_device *netde struct ionic_lif *lif = netdev_priv(netdev); int err = 0;
- if (!test_bit(IONIC_LIF_UP, lif->state)) { + if (!test_bit(IONIC_LIF_F_UP, lif->state)) { dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n", __func__, lif->name); return 0; } dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name); - clear_bit(IONIC_LIF_UP, lif->state); + clear_bit(IONIC_LIF_F_UP, lif->state);
/* carrier off before disabling queues to avoid watchdog timeout */ netif_carrier_off(netdev); @@@ -1700,7 -1688,7 +1700,7 @@@ static int ionic_set_vf_mac(struct net_ if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) return -EINVAL;
- down_read(&ionic->vf_op_lock); + down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { ret = -EINVAL; @@@ -1710,7 -1698,7 +1710,7 @@@ ether_addr_copy(ionic->vfs[vf].macaddr, mac); }
- up_read(&ionic->vf_op_lock); + up_write(&ionic->vf_op_lock); return ret; }
@@@ -1731,7 -1719,7 +1731,7 @@@ static int ionic_set_vf_vlan(struct net if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT;
- down_read(&ionic->vf_op_lock); + down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { ret = -EINVAL; @@@ -1742,7 -1730,7 +1742,7 @@@ ionic->vfs[vf].vlanid = vlan; }
- up_read(&ionic->vf_op_lock); + up_write(&ionic->vf_op_lock); return ret; }
@@@ -1883,7 -1871,7 +1883,7 @@@ int ionic_reset_queues(struct ionic_li /* Put off the next watchdog timeout */ netif_trans_update(lif->netdev);
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET); + err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); if (err) return err;
@@@ -1893,7 -1881,7 +1893,7 @@@ if (!err && running) ionic_open(lif->netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return err; } @@@ -2060,10 -2048,10 +2060,10 @@@ void ionic_lifs_free(struct ionic *ioni
static void ionic_lif_deinit(struct ionic_lif *lif) { - if (!test_bit(IONIC_LIF_INITED, lif->state)) + if (!test_bit(IONIC_LIF_F_INITED, lif->state)) return;
- clear_bit(IONIC_LIF_INITED, lif->state); + clear_bit(IONIC_LIF_F_INITED, lif->state);
ionic_rx_filters_deinit(lif); ionic_lif_rss_deinit(lif); @@@ -2299,7 -2287,7 +2299,7 @@@ static int ionic_lif_init(struct ionic_
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
- set_bit(IONIC_LIF_INITED, lif->state); + set_bit(IONIC_LIF_F_INITED, lif->state);
INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
@@@ -2387,12 -2375,6 +2387,12 @@@ int ionic_lifs_register(struct ionic *i { int err;
+ /* the netdev is not registered on the management device, it is + * only used as a vehicle for napi operations on the adminq + */ + if (ionic->is_mgmt_nic) + return 0; + INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
ionic->nb.notifier_call = ionic_lif_notify; @@@ -2426,9 -2408,6 +2426,9 @@@ void ionic_lifs_unregister(struct ioni * current model, so don't bother searching the * ionic->lif for candidates to unregister */ + if (!ionic->master_lif) + return; + cancel_work_sync(&ionic->master_lif->deferred.work); cancel_work_sync(&ionic->master_lif->tx_timeout_work); if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) diff --combined drivers/net/ethernet/sfc/efx.h index da54afaa3c44,95395d67ea2d..66dcab140449 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@@ -20,6 -20,7 +20,7 @@@ netdev_tx_t efx_hard_start_xmit(struct struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); + void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data); extern unsigned int efx_piobuf_size; @@@ -150,6 -151,24 +151,6 @@@ static inline s32 efx_filter_get_rx_ids int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); -static inline void efx_filter_rfs_expire(struct work_struct *data) -{ - struct delayed_work *dwork = to_delayed_work(data); - struct efx_channel *channel; - unsigned int time, quota; - - channel = container_of(dwork, struct efx_channel, filter_work); - time = jiffies - channel->rfs_last_expiry; - quota = channel->rfs_filter_count * time / (30 * HZ); - if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) - channel->rfs_last_expiry += time; - /* Ensure we do more work eventually even if NAPI poll is not happening */ - schedule_delayed_work(dwork, 30 * HZ); -} -#define efx_filter_rfs_enabled() 1 -#else -static inline void efx_filter_rfs_expire(struct work_struct *data) {} -#define efx_filter_rfs_enabled() 0 #endif
/* RSS contexts */ diff --combined drivers/net/ethernet/sfc/efx_channels.c index d2d738314c50,73d4e39b5b16..c492523b986c --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@@ -485,23 -485,6 +485,23 @@@ void efx_remove_eventq(struct efx_chann * *************************************************************************/
+#ifdef CONFIG_RFS_ACCEL +static void efx_filter_rfs_expire(struct work_struct *data) +{ + struct delayed_work *dwork = to_delayed_work(data); + struct efx_channel *channel; + unsigned int time, quota; + + channel = container_of(dwork, struct efx_channel, filter_work); + time = jiffies - channel->rfs_last_expiry; + quota = channel->rfs_filter_count * time / (30 * HZ); + if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) + channel->rfs_last_expiry += time; + /* Ensure we do more work eventually even if NAPI poll is not happening */ + schedule_delayed_work(dwork, 30 * HZ); +} +#endif + /* Allocate and initialise a channel structure. */ struct efx_channel * efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) @@@ -600,6 -583,7 +600,7 @@@ struct efx_channel *efx_copy_channel(co if (tx_queue->channel) tx_queue->channel = channel; tx_queue->buffer = NULL; + tx_queue->cb_page = NULL; memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); }
@@@ -1183,9 -1167,6 +1184,9 @@@ static int efx_poll(struct napi_struct struct efx_channel *channel = container_of(napi, struct efx_channel, napi_str); struct efx_nic *efx = channel->efx; +#ifdef CONFIG_RFS_ACCEL + unsigned int time; +#endif int spent;
netif_vdbg(efx, intr, efx->net_dev, @@@ -1205,10 -1186,7 +1206,10 @@@
#ifdef CONFIG_RFS_ACCEL /* Perhaps expire some ARFS filters */ - mod_delayed_work(system_wq, &channel->filter_work, 0); + time = jiffies - channel->rfs_last_expiry; + /* Would our quota be >= 20? */ + if (channel->rfs_filter_count * time >= 600 * HZ) + mod_delayed_work(system_wq, &channel->filter_work, 0); #endif
/* There is no race here; although napi_disable() will diff --combined drivers/net/ethernet/sfc/net_driver.h index 392bd5b7017e,8164f0edcbf0..b836315bac87 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@@ -208,8 -208,6 +208,6 @@@ struct efx_tx_buffer * avoid cache-line ping-pong between the xmit path and the * completion path. * @merge_events: Number of TX merged completion events - * @completed_desc_ptr: Most recent completed pointer - only used with - * timestamping. * @completed_timestamp_major: Top part of the most recent tx timestamp. * @completed_timestamp_minor: Low part of the most recent tx timestamp. * @insert_count: Current insert pointer @@@ -269,7 -267,6 +267,6 @@@ struct efx_tx_queue unsigned int merge_events; unsigned int bytes_compl; unsigned int pkts_compl; - unsigned int completed_desc_ptr; u32 completed_timestamp_major; u32 completed_timestamp_minor;
@@@ -336,7 -333,7 +333,7 @@@ struct efx_rx_buffer struct efx_rx_page_state { dma_addr_t dma_addr;
- unsigned int __pad[0] ____cacheline_aligned; + unsigned int __pad[] ____cacheline_aligned; };
/** diff --combined drivers/net/ethernet/sfc/tx.c index 696a77c20cb7,8aafc54a4684..19b58563cb78 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@@ -287,8 -287,9 +287,8 @@@ static int efx_tx_tso_fallback(struct e return PTR_ERR(segments);
dev_consume_skb_any(skb); - skb = segments;
- skb_list_walk_safe(skb, skb, next) { + skb_list_walk_safe(segments, skb, next) { skb_mark_not_on_list(skb); efx_enqueue_skb(tx_queue, skb); } @@@ -534,6 -535,44 +534,44 @@@ netdev_tx_t efx_hard_start_xmit(struct return efx_enqueue_skb(tx_queue, skb); }
+ void efx_xmit_done_single(struct efx_tx_queue *tx_queue) + { + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int read_ptr; + bool finished = false; + + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (!finished) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + struct efx_nic *efx = tx_queue->efx; + + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious single TX completion\n", + tx_queue->queue); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + /* Need to check the flag before dequeueing. */ + if (buffer->flags & EFX_TX_BUF_SKB) + finished = true; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } + + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + EFX_WARN_ON_PARANOID(pkts_compl != 1); + + efx_xmit_done_check_empty(tx_queue); + } + void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; diff --combined drivers/net/phy/phy_device.c index 7c00b029ebfb,28e3c5c0e3c3..a585faf8b844 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@@ -286,6 -286,8 +286,8 @@@ static int mdio_bus_phy_suspend(struct if (!mdio_bus_phy_may_suspend(phydev)) return 0;
+ phydev->suspended_by_mdio_bus = 1; + return phy_suspend(phydev); }
@@@ -294,9 -296,11 +296,11 @@@ static int mdio_bus_phy_resume(struct d struct phy_device *phydev = to_phy_device(dev); int ret;
- if (!mdio_bus_phy_may_suspend(phydev)) + if (!phydev->suspended_by_mdio_bus) goto no_resume;
+ phydev->suspended_by_mdio_bus = 0; + ret = phy_resume(phydev); if (ret < 0) return ret; @@@ -1931,10 -1935,9 +1935,10 @@@ int genphy_update_link(struct phy_devic
/* The link state is latched low so that momentary link * drops can be detected. Do not double-read the status - * in polling mode to detect such short link drops. + * in polling mode to detect such short link drops except + * the link was already down. */ - if (!phy_polling_mode(phydev)) { + if (!phy_polling_mode(phydev) || !phydev->link) { status = phy_read(phydev, MII_BMSR); if (status < 0) return status; @@@ -2363,7 -2366,22 +2367,7 @@@ void phy_set_asym_pause(struct phy_devi __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
linkmode_copy(oldadv, phydev->advertising); - - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); - - if (rx) { - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->advertising); - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); - } - - if (tx) - linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); + linkmode_set_pause(phydev->advertising, tx, rx);
if (!linkmode_equal(oldadv, phydev->advertising) && phydev->autoneg) @@@ -2396,32 -2414,6 +2400,32 @@@ bool phy_validate_pause(struct phy_devi } EXPORT_SYMBOL(phy_validate_pause);
+/** + * phy_get_pause - resolve negotiated pause modes + * @phydev: phy_device struct + * @tx_pause: pointer to bool to indicate whether transmit pause should be + * enabled. + * @rx_pause: pointer to bool to indicate whether receive pause should be + * enabled. + * + * Resolve and return the flow control modes according to the negotiation + * result. This includes checking that we are operating in full duplex mode. + * See linkmode_resolve_pause() for further details. + */ +void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause) +{ + if (phydev->duplex != DUPLEX_FULL) { + *tx_pause = false; + *rx_pause = false; + return; + } + + return linkmode_resolve_pause(phydev->advertising, + phydev->lp_advertising, + tx_pause, rx_pause); +} +EXPORT_SYMBOL(phy_get_pause); + static bool phy_drv_supports_irq(struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->ack_interrupt; diff --combined drivers/net/phy/phylink.c index 19db68d74cb4,6e66b8e77ec7..a8eeaabb2d18 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@@ -181,11 -181,9 +181,11 @@@ static int phylink_parse_fixedlink(stru /* We treat the "pause" and "asym-pause" terminology as * defining the link partner's ability. */ if (fwnode_property_read_bool(fixed_node, "pause")) - pl->link_config.pause |= MLO_PAUSE_SYM; + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + pl->link_config.lp_advertising); if (fwnode_property_read_bool(fixed_node, "asym-pause")) - pl->link_config.pause |= MLO_PAUSE_ASYM; + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + pl->link_config.lp_advertising);
if (ret == 0) { desc = fwnode_gpiod_get_index(fixed_node, "link", 0, @@@ -217,11 -215,9 +217,11 @@@ DUPLEX_FULL : DUPLEX_HALF; pl->link_config.speed = prop[2]; if (prop[3]) - pl->link_config.pause |= MLO_PAUSE_SYM; + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + pl->link_config.lp_advertising); if (prop[4]) - pl->link_config.pause |= MLO_PAUSE_ASYM; + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + pl->link_config.lp_advertising); } }
@@@ -312,13 -308,11 +312,13 @@@ static int phylink_parse_mode(struct ph phylink_set(pl->supported, 1000baseT_Half); phylink_set(pl->supported, 1000baseT_Full); phylink_set(pl->supported, 1000baseX_Full); + phylink_set(pl->supported, 1000baseKX_Full); phylink_set(pl->supported, 2500baseT_Full); phylink_set(pl->supported, 2500baseX_Full); phylink_set(pl->supported, 5000baseT_Full); phylink_set(pl->supported, 10000baseT_Full); phylink_set(pl->supported, 10000baseKR_Full); + phylink_set(pl->supported, 10000baseKX4_Full); phylink_set(pl->supported, 10000baseCR_Full); phylink_set(pl->supported, 10000baseSR_Full); phylink_set(pl->supported, 10000baseLR_Full); @@@ -340,42 -334,11 +340,42 @@@ "failed to validate link configuration for in-band status\n"); return -EINVAL; } + + /* Check if MAC/PCS also supports Autoneg. */ + pl->link_config.an_enabled = phylink_test(pl->supported, Autoneg); }
return 0; }
+static void phylink_apply_manual_flow(struct phylink *pl, + struct phylink_link_state *state) +{ + /* If autoneg is disabled, pause AN is also disabled */ + if (!state->an_enabled) + state->pause &= ~MLO_PAUSE_AN; + + /* Manual configuration of pause modes */ + if (!(pl->link_config.pause & MLO_PAUSE_AN)) + state->pause = pl->link_config.pause; +} + +static void phylink_resolve_flow(struct phylink_link_state *state) +{ + bool tx_pause, rx_pause; + + state->pause = MLO_PAUSE_NONE; + if (state->duplex == DUPLEX_FULL) { + linkmode_resolve_pause(state->advertising, + state->lp_advertising, + &tx_pause, &rx_pause); + if (tx_pause) + state->pause |= MLO_PAUSE_TX; + if (rx_pause) + state->pause |= MLO_PAUSE_RX; + } +} + static void phylink_mac_config(struct phylink *pl, const struct phylink_link_state *state) { @@@ -424,45 -387,49 +424,45 @@@ static void phylink_mac_pcs_get_state(s /* The fixed state is... fixed except for the link state, * which may be determined by a GPIO or a callback. */ -static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) +static void phylink_get_fixed_state(struct phylink *pl, + struct phylink_link_state *state) { *state = pl->link_config; if (pl->get_fixed_state) pl->get_fixed_state(pl->netdev, state); else if (pl->link_gpio) state->link = !!gpiod_get_value_cansleep(pl->link_gpio); + + phylink_resolve_flow(state); }
-/* Flow control is resolved according to our and the link partners - * advertisements using the following drawn from the 802.3 specs: - * Local device Link partner - * Pause AsymDir Pause AsymDir Result - * 1 X 1 X TX+RX - * 0 1 1 1 TX - * 1 1 0 1 RX - */ -static void phylink_resolve_flow(struct phylink *pl, - struct phylink_link_state *state) +static void phylink_mac_initial_config(struct phylink *pl) { - int new_pause = 0; + struct phylink_link_state link_state;
- if (pl->link_config.pause & MLO_PAUSE_AN) { - int pause = 0; + switch (pl->cur_link_an_mode) { + case MLO_AN_PHY: + link_state = pl->phy_state; + break;
- if (phylink_test(pl->link_config.advertising, Pause)) - pause |= MLO_PAUSE_SYM; - if (phylink_test(pl->link_config.advertising, Asym_Pause)) - pause |= MLO_PAUSE_ASYM; + case MLO_AN_FIXED: + phylink_get_fixed_state(pl, &link_state); + break;
- pause &= state->pause; + case MLO_AN_INBAND: + link_state = pl->link_config; + if (link_state.interface == PHY_INTERFACE_MODE_SGMII) + link_state.pause = MLO_PAUSE_NONE; + break;
- if (pause & MLO_PAUSE_SYM) - new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; - else if (pause & MLO_PAUSE_ASYM) - new_pause = state->pause & MLO_PAUSE_SYM ? - MLO_PAUSE_TX : MLO_PAUSE_RX; - } else { - new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; + default: /* can't happen */ + return; }
- state->pause &= ~MLO_PAUSE_TXRX_MASK; - state->pause |= new_pause; + link_state.link = false; + + phylink_apply_manual_flow(pl, &link_state); + phylink_mac_config(pl, &link_state); }
static const char *phylink_pause_to_str(int pause) @@@ -485,11 -452,8 +485,11 @@@ static void phylink_mac_link_up(struct struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface; - pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode, - pl->cur_interface, pl->phydev); + pl->ops->mac_link_up(pl->config, pl->phydev, + pl->cur_link_an_mode, pl->cur_interface, + link_state.speed, link_state.duplex, + !!(link_state.pause & MLO_PAUSE_TX), + !!(link_state.pause & MLO_PAUSE_RX));
if (ndev) netif_carrier_on(ndev); @@@ -529,7 -493,7 +529,7 @@@ static void phylink_resolve(struct work switch (pl->cur_link_an_mode) { case MLO_AN_PHY: link_state = pl->phy_state; - phylink_resolve_flow(pl, &link_state); + phylink_apply_manual_flow(pl, &link_state); phylink_mac_config_up(pl, &link_state); break;
@@@ -551,12 -515,10 +551,12 @@@ link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with - * the pause mode bits. */ - link_state.pause |= pl->phy_state.pause; - phylink_resolve_flow(pl, &link_state); + * the PHY flow control bits. */ + link_state.pause = pl->phy_state.pause; + phylink_apply_manual_flow(pl, &link_state); phylink_mac_config(pl, &link_state); + } else { + phylink_apply_manual_flow(pl, &link_state); } break; } @@@ -743,18 -705,15 +743,18 @@@ static void phylink_phy_change(struct p bool do_carrier) { struct phylink *pl = phydev->phylink; + bool tx_pause, rx_pause; + + phy_get_pause(phydev, &tx_pause, &rx_pause);
mutex_lock(&pl->state_mutex); pl->phy_state.speed = phydev->speed; pl->phy_state.duplex = phydev->duplex; pl->phy_state.pause = MLO_PAUSE_NONE; - if (phydev->pause) - pl->phy_state.pause |= MLO_PAUSE_SYM; - if (phydev->asym_pause) - pl->phy_state.pause |= MLO_PAUSE_ASYM; + if (tx_pause) + pl->phy_state.pause |= MLO_PAUSE_TX; + if (rx_pause) + pl->phy_state.pause |= MLO_PAUSE_RX; pl->phy_state.interface = phydev->interface; pl->phy_state.link = up; mutex_unlock(&pl->state_mutex); @@@ -802,8 -761,14 +802,14 @@@ static int phylink_bringup_phy(struct p config.interface = interface;
ret = phylink_validate(pl, supported, &config); - if (ret) + if (ret) { + phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n", + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising, + ret); return ret; + }
phy->phylink = pl; phy->phy_link_change = phylink_phy_change; @@@ -818,9 -783,6 +824,9 @@@ mutex_lock(&pl->state_mutex); pl->phydev = phy; pl->phy_state.interface = interface; + pl->phy_state.pause = MLO_PAUSE_NONE; + pl->phy_state.speed = SPEED_UNKNOWN; + pl->phy_state.duplex = DUPLEX_UNKNOWN; linkmode_copy(pl->supported, supported); linkmode_copy(pl->link_config.advertising, config.advertising);
@@@ -1050,7 -1012,8 +1056,7 @@@ void phylink_start(struct phylink *pl * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. */ - phylink_resolve_flow(pl, &pl->link_config); - phylink_mac_config(pl, &pl->link_config); + phylink_mac_initial_config(pl);
/* Restart autonegotiation if using 802.3z to ensure that the link * parameters are properly negotiated. This is necessary for DSA @@@ -1416,9 -1379,6 +1422,9 @@@ int phylink_ethtool_set_pauseparam(stru
ASSERT_RTNL();
+ if (pl->cur_link_an_mode == MLO_AN_FIXED) + return -EOPNOTSUPP; + if (!phylink_test(pl->supported, Pause) && !phylink_test(pl->supported, Asym_Pause)) return -EOPNOTSUPP; @@@ -1427,8 -1387,8 +1433,8 @@@ !pause->autoneg && pause->rx_pause != pause->tx_pause) return -EINVAL;
- config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK); - + mutex_lock(&pl->state_mutex); + config->pause = 0; if (pause->autoneg) config->pause |= MLO_PAUSE_AN; if (pause->rx_pause) @@@ -1436,22 -1396,6 +1442,22 @@@ if (pause->tx_pause) config->pause |= MLO_PAUSE_TX;
+ /* + * See the comments for linkmode_set_pause(), wrt the deficiencies + * with the current implementation. A solution to this issue would + * be: + * ethtool Local device + * rx tx Pause AsymDir + * 0 0 0 0 + * 1 0 1 1 + * 0 1 0 1 + * 1 1 1 1 + * and then use the ethtool rx/tx enablement status to mask the + * rx/tx pause resolution. + */ + linkmode_set_pause(config->advertising, pause->tx_pause, + pause->rx_pause); + /* If we have a PHY, phylib will call our link state function if the * mode has changed, which will trigger a resolve and update the MAC * configuration. @@@ -1461,10 -1405,19 +1467,10 @@@ pause->tx_pause); } else if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { - switch (pl->cur_link_an_mode) { - case MLO_AN_FIXED: - /* Should we allow fixed links to change against the config? */ - phylink_resolve_flow(pl, config); - phylink_mac_config(pl, config); - break; - - case MLO_AN_INBAND: - phylink_mac_config(pl, config); - phylink_mac_an_restart(pl); - break; - } + phylink_mac_config(pl, &pl->link_config); + phylink_mac_an_restart(pl); } + mutex_unlock(&pl->state_mutex);
return 0; } @@@ -1556,14 -1509,13 +1562,14 @@@ static int phylink_mii_emul_read(unsign struct phylink_link_state *state) { struct fixed_phy_status fs; + unsigned long *lpa = state->lp_advertising; int val;
fs.link = state->link; fs.speed = state->speed; fs.duplex = state->duplex; - fs.pause = state->pause & MLO_PAUSE_SYM; - fs.asym_pause = state->pause & MLO_PAUSE_ASYM; + fs.pause = test_bit(ETHTOOL_LINK_MODE_Pause_BIT, lpa); + fs.asym_pause = test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, lpa);
val = swphy_read_reg(reg, &fs); if (reg == MII_BMSR) { @@@ -1868,7 -1820,7 +1874,7 @@@ static int phylink_sfp_config(struct ph
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) - phylink_mac_config(pl, &pl->link_config); + phylink_mac_initial_config(pl);
return ret; } diff --combined drivers/net/usb/r8152.c index f27fdd6ab86f,95b19ce96513..8f8d9883d363 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -891,7 -891,7 +891,7 @@@ struct fw_block struct fw_header { u8 checksum[32]; char version[RTL_VER_SIZE]; - struct fw_block blocks[0]; + struct fw_block blocks[]; } __packed;
/** @@@ -930,7 -930,7 +930,7 @@@ struct fw_mac __le32 reserved; __le16 fw_ver_reg; u8 fw_ver_data; - char info[0]; + char info[]; } __packed;
/** @@@ -982,7 -982,7 +982,7 @@@ struct fw_phy_nc __le16 bp_start; __le16 bp_num; __le16 bp[4]; - char info[0]; + char info[]; } __packed;
enum rtl_fw_type { @@@ -1948,6 -1948,29 +1948,6 @@@ drop } }
-/* msdn_giant_send_check() - * According to the document of microsoft, the TCP Pseudo Header excludes the - * packet length for IPv6 TCP large packets. - */ -static int msdn_giant_send_check(struct sk_buff *skb) -{ - const struct ipv6hdr *ipv6h; - struct tcphdr *th; - int ret; - - ret = skb_cow_head(skb, 0); - if (ret) - return ret; - - ipv6h = ipv6_hdr(skb); - th = tcp_hdr(skb); - - th->check = 0; - th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); - - return ret; -} - static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) { if (skb_vlan_tag_present(skb)) { @@@ -1993,11 -2016,10 +1993,11 @@@ static int r8152_tx_csum(struct r8152 * break;
case htons(ETH_P_IPV6): - if (msdn_giant_send_check(skb)) { + if (skb_cow_head(skb, 0)) { ret = TX_CSUM_TSO; goto unavailable; } + tcp_v6_gso_csum_prep(skb); opts1 |= GTSENDV6; break;
@@@ -3199,6 -3221,8 +3199,8 @@@ static u16 r8153_phy_status(struct r815 }
msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; }
return data; @@@ -5380,7 -5404,10 +5382,10 @@@ static void r8153_init(struct r8152 *tp if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; }
data = r8153_phy_status(tp, 0); @@@ -5517,7 -5544,10 +5522,10 @@@ static void r8153b_init(struct r8152 *t if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + break; }
data = r8153_phy_status(tp, 0); @@@ -6345,7 -6375,6 +6353,7 @@@ static int rtl8152_set_ringparam(struc }
static const struct ethtool_ops ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, diff --combined drivers/net/wireless/mediatek/mt76/dma.c index e5dd7080e88e,1847f55e199b..75e659774e07 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@@ -132,11 -132,6 +132,11 @@@ mt76_dma_sync_idx(struct mt76_dev *dev writel(q->ndesc, &q->regs->ring_size); q->head = readl(&q->regs->dma_idx); q->tail = q->head; +} + +static void +mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) +{ writel(q->head, &q->regs->cpu_idx); }
@@@ -146,7 -141,7 +146,7 @@@ mt76_dma_tx_cleanup(struct mt76_dev *de struct mt76_sw_queue *sq = &dev->q_tx[qid]; struct mt76_queue *q = sq->q; struct mt76_queue_entry entry; - unsigned int n_swq_queued[4] = {}; + unsigned int n_swq_queued[8] = {}; unsigned int n_queued = 0; bool wake = false; int i, last; @@@ -183,25 -178,15 +183,25 @@@ spin_lock_bh(&q->lock);
q->queued -= n_queued; - for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) { + for (i = 0; i < 4; i++) { if (!n_swq_queued[i]) continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i]; }
- if (flush) + /* ext PHY */ + for (i = 0; i < 4; i++) { + if (!n_swq_queued[i]) + continue; + + dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i]; + } + + if (flush) { mt76_dma_sync_idx(dev, q); + mt76_dma_kick_queue(dev, q); + }
wake = wake && q->stopped && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; @@@ -253,9 -238,7 +253,9 @@@ mt76_dma_dequeue(struct mt76_dev *dev, if (!q->queued) return NULL;
- if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + if (flush) + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) return NULL;
q->tail = (q->tail + 1) % q->ndesc; @@@ -264,6 -247,12 +264,6 @@@ return mt76_dma_get_buf(dev, q, idx, len, info, more); }
-static void -mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) -{ - writel(q->head, &q->regs->cpu_idx); -} - static int mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, struct sk_buff *skb, u32 tx_info) @@@ -272,13 -261,10 +272,13 @@@ struct mt76_queue_buf buf; dma_addr_t addr;
+ if (q->queued + 1 >= q->ndesc - 1) + goto error; + addr = dma_map_single(dev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev, addr))) - return -ENOMEM; + goto error;
buf.addr = addr; buf.len = skb->len; @@@ -289,10 -275,6 +289,10 @@@ spin_unlock_bh(&q->lock);
return 0; + +error: + dev_kfree_skb(skb); + return -ENOMEM; }
static int @@@ -304,7 -286,6 +304,7 @@@ mt76_dma_tx_queue_skb(struct mt76_dev * struct mt76_tx_info tx_info = { .skb = skb, }; + struct ieee80211_hw *hw; int len, n = 0, ret = -ENOMEM; struct mt76_queue_entry e; struct mt76_txwi_cache *t; @@@ -314,8 -295,7 +314,8 @@@
t = mt76_get_txwi(dev); if (!t) { - ieee80211_free_txskb(dev->hw, skb); + hw = mt76_tx_status_get_hw(dev, skb); + ieee80211_free_txskb(hw, skb); return -ENOMEM; } txwi = mt76_get_txwi_ptr(dev, t); @@@ -447,7 -427,7 +447,7 @@@ mt76_dma_rx_reset(struct mt76_dev *dev int i;
for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q); mt76_dma_sync_idx(dev, q); @@@ -467,10 -447,13 +467,13 @@@ mt76_add_fragment(struct mt76_dev *dev struct page *page = virt_to_head_page(data); int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; + struct skb_shared_info *shinfo = skb_shinfo(skb);
- offset += q->buf_offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, - q->buf_size); + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { + offset += q->buf_offset; + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, + q->buf_size); + }
if (more) return; @@@ -548,7 -531,6 +551,7 @@@ mt76_dma_rx_poll(struct napi_struct *na dev = container_of(napi->dev, struct mt76_dev, napi_dev); qid = napi - dev->napi;
+ local_bh_disable(); rcu_read_lock();
do { @@@ -558,7 -540,6 +561,7 @@@ } while (cur && done < budget);
rcu_read_unlock(); + local_bh_enable();
if (done < budget && napi_complete(napi)) dev->drv->rx_poll_complete(dev, qid); @@@ -577,6 -558,7 +580,6 @@@ mt76_dma_init(struct mt76_dev *dev netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 64); mt76_dma_rx_fill(dev, &dev->q_rx[i]); - skb_queue_head_init(&dev->rx_skb[i]); napi_enable(&dev->napi[i]); }
diff --combined drivers/s390/net/qeth_core.h index b7d64690ea38,468cada49e72..962be94ed3ca --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@@ -189,8 -189,6 +189,8 @@@ struct qeth_vnicc_info #define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */ #define QETH_IQD_MCAST_TXQ 0 #define QETH_IQD_MIN_UCAST_TXQ 1 + +#define QETH_RX_COPYBREAK (PAGE_SIZE >> 1) #define QETH_IN_BUF_SIZE_DEFAULT 65536 #define QETH_IN_BUF_COUNT_DEFAULT 64 #define QETH_IN_BUF_COUNT_HSDEFAULT 128 @@@ -221,6 -219,9 +221,6 @@@ #define QETH_HIGH_WATERMARK_PACK 5 #define QETH_WATERMARK_PACK_FUZZ 1
-/* large receive scatter gather copy break */ -#define QETH_RX_SG_CB (PAGE_SIZE >> 1) - struct qeth_hdr_layer3 { __u8 id; __u8 flags; @@@ -368,7 -369,7 +368,7 @@@ enum qeth_qdio_info_states struct qeth_buffer_pool_entry { struct list_head list; struct list_head init_list; - void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; + struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; };
struct qeth_qdio_buffer_pool { @@@ -710,6 -711,7 +710,6 @@@ struct qeth_card_options struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; enum qeth_discipline_id layer; - int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; enum qeth_ipa_isolation_modes prev_isolation; int sniffer; @@@ -768,10 -770,6 +768,10 @@@ struct qeth_switch_info __u32 settings; };
+struct qeth_priv { + unsigned int rx_copybreak; +}; + #define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card { @@@ -985,7 -983,7 +985,7 @@@ extern const struct attribute_group qet extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *); - int qeth_realloc_buffer_pool(struct qeth_card *, int); + int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); void qeth_core_free_discipline(struct qeth_card *);
diff --combined drivers/s390/net/qeth_core_main.c index 37c17ad8ee25,6d3f2f14b414..6caa78d51bd1 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -65,7 -65,6 +65,6 @@@ static struct lock_class_key qdio_out_s static void qeth_issue_next_read_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned int data_length); - static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, @@@ -212,49 -211,121 +211,121 @@@ void qeth_clear_working_pool_list(struc } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+ static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) + { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { + if (entry->elements[i]) + __free_page(entry->elements[i]); + } + + kfree(entry); + } + + static void qeth_free_buffer_pool(struct qeth_card *card) + { + struct qeth_buffer_pool_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } + } + + static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) + { + struct qeth_buffer_pool_entry *entry; + unsigned int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + for (i = 0; i < pages; i++) { + entry->elements[i] = alloc_page(GFP_KERNEL); + + if (!entry->elements[i]) { + qeth_free_pool_entry(entry); + return NULL; + } + } + + return entry; + } + static int qeth_alloc_buffer_pool(struct qeth_card *card) { - struct qeth_buffer_pool_entry *pool_entry; - void *ptr; - int i, j; + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + unsigned int i;
QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { - pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); - if (!pool_entry) { + struct qeth_buffer_pool_entry *entry; + + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { qeth_free_buffer_pool(card); return -ENOMEM; } - for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { - ptr = (void *) __get_free_page(GFP_KERNEL); - if (!ptr) { - while (j > 0) - free_page((unsigned long) - pool_entry->elements[--j]); - kfree(pool_entry); - qeth_free_buffer_pool(card); - return -ENOMEM; - } - pool_entry->elements[j] = ptr; - } - list_add(&pool_entry->init_list, - &card->qdio.init_pool.entry_list); + + list_add(&entry->init_list, &card->qdio.init_pool.entry_list); } return 0; }
- int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) + int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) { + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; + struct qeth_buffer_pool_entry *entry, *tmp; + int delta = count - pool->buf_count; + LIST_HEAD(entries); + QETH_CARD_TEXT(card, 2, "realcbp");
- /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ - qeth_clear_working_pool_list(card); - qeth_free_buffer_pool(card); - card->qdio.in_buf_pool.buf_count = bufcnt; - card->qdio.init_pool.buf_count = bufcnt; - return qeth_alloc_buffer_pool(card); + /* Defer until queue is allocated: */ + if (!card->qdio.in_q) + goto out; + + /* Remove entries from the pool: */ + while (delta < 0) { + entry = list_first_entry(&pool->entry_list, + struct qeth_buffer_pool_entry, + init_list); + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + + delta++; + } + + /* Allocate additional entries: */ + while (delta > 0) { + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { + list_for_each_entry_safe(entry, tmp, &entries, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } + + return -ENOMEM; + } + + list_add(&entry->init_list, &entries); + + delta--; + } + + list_splice(&entries, &pool->entry_list); + + out: + card->qdio.in_buf_pool.buf_count = count; + pool->buf_count = count; + return 0; } - EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); + EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
static void qeth_free_qdio_queue(struct qeth_qdio_q *q) { @@@ -742,7 -813,7 +813,7 @@@ static void qeth_issue_next_read_cb(str /* fall through */ default: qeth_clear_ipacmd_list(card); - goto out; + goto err_idx; }
cmd = __ipa_reply(iob); @@@ -795,9 -866,8 +866,9 @@@ out memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), QETH_SEQ_NO_LENGTH); - qeth_put_cmd(iob); __qeth_issue_next_read(card); +err_idx: + qeth_put_cmd(iob); }
static int qeth_set_thread_start_bit(struct qeth_card *card, @@@ -1171,19 -1241,6 +1242,6 @@@ void qeth_drain_output_queues(struct qe } EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
- static void qeth_free_buffer_pool(struct qeth_card *card) - { - struct qeth_buffer_pool_entry *pool_entry, *tmp; - int i = 0; - list_for_each_entry_safe(pool_entry, tmp, - &card->qdio.init_pool.entry_list, init_list){ - for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) - free_page((unsigned long)pool_entry->elements[i]); - list_del(&pool_entry->init_list); - kfree(pool_entry); - } - } - static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) { unsigned int count = single ? 1 : card->dev->num_tx_queues; @@@ -1205,7 -1262,6 +1263,6 @@@ if (count == 1) dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
- card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; card->qdio.no_out_queues = count; return 0; } @@@ -1258,6 -1314,7 +1315,6 @@@ static void qeth_set_initial_options(st { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; - card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; card->options.layer = QETH_DISCIPLINE_UNDETERMINED; @@@ -1625,16 -1682,17 +1682,16 @@@ static void qeth_set_blkt_defaults(stru } }
-static void qeth_init_tokens(struct qeth_card *card) +static void qeth_idx_init(struct qeth_card *card) { + memset(&card->seqno, 0, sizeof(card->seqno)); + card->token.issuer_rm_w = 0x00010103UL; card->token.cm_filter_w = 0x00010108UL; card->token.cm_connection_w = 0x0001010aUL; card->token.ulp_filter_w = 0x0001010bUL; card->token.ulp_connection_w = 0x0001010dUL; -}
-static void qeth_init_func_level(struct qeth_card *card) -{ switch (card->info.type) { case QETH_CARD_TYPE_IQD: card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; @@@ -2392,7 -2450,6 +2449,6 @@@ static void qeth_free_qdio_queues(struc return;
qeth_free_cq(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); @@@ -2574,7 -2631,6 +2630,6 @@@ static struct qeth_buffer_pool_entry *q struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; - struct page *page;
if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; @@@ -2583,7 -2639,7 +2638,7 @@@ entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { + if (page_count(entry->elements[i]) > 1) { free = 0; break; } @@@ -2598,15 -2654,15 +2653,15 @@@ entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { - page = alloc_page(GFP_ATOMIC); - if (!page) { + if (page_count(entry->elements[i]) > 1) { + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) return NULL; - } else { - free_page((unsigned long)entry->elements[i]); - entry->elements[i] = page_address(page); - QETH_CARD_STAT_INC(card, rx_sg_alloc_page); - } + + __free_page(entry->elements[i]); + entry->elements[i] = page; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); } } list_del_init(&entry->list); @@@ -2624,12 -2680,12 +2679,12 @@@ static int qeth_init_input_buffer(struc ETH_HLEN + sizeof(struct ipv6hdr)); if (!buf->rx_skb) - return 1; + return -ENOMEM; }
pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) - return 1; + return -ENOBUFS;
/* * since the buffer is accessed only from the input_tasklet @@@ -2642,7 -2698,7 +2697,7 @@@ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = - virt_to_phys(pool_entry->elements[i]); + page_to_phys(pool_entry->elements[i]); if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else @@@ -2674,10 -2730,15 +2729,15 @@@ static int qeth_init_qdio_queues(struc /* inbound queue */ qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); memset(&card->rx, 0, sizeof(struct qeth_rx)); + qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) - qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, @@@ -3406,7 -3467,8 +3466,7 @@@ static void qeth_qdio_start_poll(struc { struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev->flags & IFF_UP) - napi_schedule_irqoff(&card->napi); + napi_schedule_irqoff(&card->napi); }
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) @@@ -4950,9 -5012,9 +5010,9 @@@ retriable else goto retry; } + qeth_determine_capabilities(card); - qeth_init_tokens(card); - qeth_init_func_level(card); + qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card); if (rc == -EINTR) { @@@ -5267,7 -5329,6 +5327,7 @@@ static int qeth_extract_skb(struct qeth int *__offset) { struct qdio_buffer_element *element = *__element; + struct qeth_priv *priv = netdev_priv(card->dev); struct qdio_buffer *buffer = qethbuffer->buffer; struct napi_struct *napi = &card->napi; unsigned int linear_len = 0; @@@ -5343,7 -5404,7 +5403,7 @@@ next_packet }
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || - (skb_len > card->options.rx_sg_cb && + (skb_len > READ_ONCE(priv->rx_copybreak) && !atomic_read(&card->force_alloc_skb) && !IS_OSN(card));
@@@ -5892,30 -5953,25 +5952,30 @@@ static void qeth_clear_dbf_list(void static struct net_device *qeth_alloc_netdev(struct qeth_card *card) { struct net_device *dev; + struct qeth_priv *priv;
switch (card->info.type) { case QETH_CARD_TYPE_IQD: - dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, + dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, ether_setup, QETH_MAX_QUEUES, 1); break; case QETH_CARD_TYPE_OSM: - dev = alloc_etherdev(0); + dev = alloc_etherdev(sizeof(*priv)); break; case QETH_CARD_TYPE_OSN: - dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); + dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, + ether_setup); break; default: - dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); + dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_QUEUES, 1); }
if (!dev) return NULL;
+ priv = netdev_priv(dev); + priv->rx_copybreak = QETH_RX_COPYBREAK; + dev->ml_priv = card; dev->watchdog_timeo = QETH_TX_TIMEOUT; dev->min_mtu = IS_OSN(card) ? 64 : 576; @@@ -6582,6 -6638,9 +6642,6 @@@ int qeth_open(struct net_device *dev
QETH_CARD_TEXT(card, 4, "qethopen");
- if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) - return -EIO; - card->data.state = CH_STATE_UP; netif_tx_start_all_queues(dev);
@@@ -6631,8 -6690,6 +6691,8 @@@ int qeth_stop(struct net_device *dev }
napi_disable(&card->napi); + qdio_stop_irq(CARD_DDEV(card), 0); + return 0; } EXPORT_SYMBOL_GPL(qeth_stop); diff --combined drivers/s390/net/qeth_l2_main.c index 0bf5e7133229,8fb29371788b..4c8e93132e08 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@@ -284,6 -284,7 +284,7 @@@ static void qeth_l2_stop_card(struct qe if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; }
@@@ -1567,11 -1568,23 +1568,11 @@@ static int qeth_l2_vnicc_makerc(struct return rc; }
-/* generic VNICC request call back control */ -struct _qeth_l2_vnicc_request_cbctl { - struct { - union{ - u32 *sup_cmds; - u32 *timeout; - }; - } result; -}; - /* generic VNICC request call back */ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct _qeth_l2_vnicc_request_cbctl *cbctl = - (struct _qeth_l2_vnicc_request_cbctl *) reply->param; struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; u32 sub_cmd = cmd->data.vnicc.hdr.sub_command; @@@ -1584,9 -1597,9 +1585,9 @@@ card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (sub_cmd == IPA_VNICC_QUERY_CMDS) - *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds; + *(u32 *)reply->param = rep->data.query_cmds.sup_cmds; else if (sub_cmd == IPA_VNICC_GET_TIMEOUT) - *cbctl->result.timeout = rep->data.getset_timeout.timeout; + *(u32 *)reply->param = rep->data.getset_timeout.timeout;
return 0; } @@@ -1627,6 -1640,7 +1628,6 @@@ static int qeth_l2_vnicc_query_chars(st static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, u32 *sup_cmds) { - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqcm"); @@@ -1637,7 -1651,10 +1638,7 @@@
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
- /* prepare callback control */ - cbctl.result.sup_cmds = sup_cmds; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds); }
/* VNICC enable/disable characteristic request */ @@@ -1661,6 -1678,7 +1662,6 @@@ static int qeth_l2_vnicc_getset_timeout u32 cmd, u32 *timeout) { struct qeth_vnicc_getset_timeout *getset_timeout; - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccgst"); @@@ -1675,7 -1693,11 +1676,7 @@@ if (cmd == IPA_VNICC_SET_TIMEOUT) getset_timeout->timeout = *timeout;
- /* prepare callback control */ - if (cmd == IPA_VNICC_GET_TIMEOUT) - cbctl.result.timeout = timeout; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout); }
/* set current VNICC flag state; called from sysfs store function */ diff --combined drivers/s390/net/qeth_l3_main.c index 1000e18c1090,82f800d1d7b3..8a803d6c9357 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@@ -920,11 -920,9 +920,11 @@@ static int qeth_l3_iqd_read_initial_mac
if (cmd->hdr.return_code) return -EIO; + if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr)) + return -EADDRNOTAVAIL;
ether_addr_copy(card->dev->dev_addr, - cmd->data.create_destroy_addr.unique_id); + cmd->data.create_destroy_addr.mac_addr); return 0; }
@@@ -932,6 -930,7 +932,6 @@@ static int qeth_l3_iqd_read_initial_mac { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "hsrmac");
@@@ -939,6 -938,9 +939,6 @@@ IPA_DATA_SIZEOF(create_destroy_addr)); if (!iob) return -ENOMEM; - cmd = __ipa_cmd(iob); - *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = - card->info.unique_id;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, NULL); @@@ -951,7 -953,8 +951,7 @@@ static int qeth_l3_get_unique_id_cb(str struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0) { - card->info.unique_id = *((__u16 *) - &cmd->data.create_destroy_addr.unique_id[6]); + card->info.unique_id = cmd->data.create_destroy_addr.uid; return 0; }
@@@ -965,6 -968,7 +965,6 @@@ static int qeth_l3_get_unique_id(struc { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "guniqeid");
@@@ -978,8 -982,10 +978,8 @@@ IPA_DATA_SIZEOF(create_destroy_addr)); if (!iob) return -ENOMEM; - cmd = __ipa_cmd(iob); - *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = - card->info.unique_id;
+ __ipa_cmd(iob)->data.create_destroy_addr.uid = card->info.unique_id; rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); return rc; } @@@ -1172,6 -1178,7 +1172,7 @@@ static void qeth_l3_stop_card(struct qe qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; }
diff --combined include/linux/device.h index 3e40533d2037,fa04dfd22bbc..1311f276f533 --- a/include/linux/device.h +++ b/include/linux/device.h @@@ -798,6 -798,17 +798,17 @@@ static inline struct device_node *dev_o return dev->of_node; }
+ static inline bool dev_has_sync_state(struct device *dev) + { + if (!dev) + return false; + if (dev->driver && dev->driver->sync_state) + return true; + if (dev->bus && dev->bus->sync_state) + return true; + return false; + } + /* * High level routines for use by the bus drivers */ @@@ -817,7 -828,6 +828,7 @@@ extern struct device *device_find_child extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); +extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); diff --combined include/linux/inet_diag.h index e4ba25d63913,c91cf2dee12a..ce9ed1c0602f --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@@ -2,22 -2,19 +2,17 @@@ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1
+ #include <net/netlink.h> #include <uapi/linux/inet_diag.h>
- struct net; - struct sock; struct inet_hashinfo; - struct nlattr; - struct nlmsghdr; - struct sk_buff; - struct netlink_callback;
struct inet_diag_handler { void (*dump)(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + int (*dump_one)(struct netlink_callback *cb, const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk, @@@ -38,25 -35,18 +33,25 @@@ __u16 idiag_info_size; };
+struct bpf_sk_storage_diag; +struct inet_diag_dump_data { + struct nlattr *req_nlas[__INET_DIAG_REQ_MAX]; +#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE] +#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES] + + struct bpf_sk_storage_diag *bpf_stg_diag; +}; + struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin); + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req);
struct sock *inet_diag_find_one_icsk(struct net *net, @@@ -67,6 -57,17 +62,17 @@@ int inet_diag_bc_sk(const struct nlatt
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+ static inline size_t inet_diag_msg_attrs_size(void) + { + return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ + #if IS_ENABLED(CONFIG_IPV6) + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ + #endif + + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4); /* INET_DIAG_CLASS_ID */ + } int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin); diff --combined include/linux/phy.h index e72dbd0d2d6a,452e8ba8665f..7a08023bdbc5 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@@ -289,7 -289,6 +289,7 @@@ static inline struct mii_bus *devm_mdio return devm_mdiobus_alloc_size(dev, 0); }
+struct mii_bus *mdio_find_bus(const char *mdio_name); void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
@@@ -358,6 -357,7 +358,7 @@@ struct macsec_ops * is_gigabit_capable: Set to true if PHY supports 1000Mbps * has_fixups: Set to true if this phy has fixups/quirks. * suspended: Set to true if this phy has been suspended successfully. + * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus. * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * loopback_enabled: Set true if this phy has been loopbacked successfully. * state: state of the PHY for management purposes @@@ -397,6 -397,7 +398,7 @@@ struct phy_device unsigned is_gigabit_capable:1; unsigned has_fixups:1; unsigned suspended:1; + unsigned suspended_by_mdio_bus:1; unsigned sysfs_links:1; unsigned loopback_enabled:1;
@@@ -558,6 -559,7 +560,7 @@@ struct phy_driver /* * Checks if the PHY generated an interrupt. * For multi-PHY devices with shared PHY interrupt pin + * Set interrupt bits have to be cleared. */ int (*did_interrupt)(struct phy_device *phydev);
@@@ -1258,9 -1260,6 +1261,9 @@@ void phy_set_sym_pause(struct phy_devic void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); +void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); +void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, + bool *tx_pause, bool *rx_pause);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --combined net/core/devlink.c index e8ccea9035c8,b831c5545d6a..f51bebc8c33f --- a/net/core/devlink.c +++ b/net/core/devlink.c @@@ -545,7 -545,6 +545,7 @@@ static int devlink_nl_port_attrs_put(st case DEVLINK_PORT_FLAVOUR_PHYSICAL: case DEVLINK_PORT_FLAVOUR_CPU: case DEVLINK_PORT_FLAVOUR_DSA: + case DEVLINK_PORT_FLAVOUR_VIRTUAL: if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->phys.port_number)) return -EMSGSIZE; @@@ -3353,34 -3352,41 +3353,41 @@@ devlink_param_value_get_from_info(cons struct genl_info *info, union devlink_param_value *value) { + struct nlattr *param_data; int len;
- if (param->type != DEVLINK_PARAM_TYPE_BOOL && - !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) + param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]; + + if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data) return -EINVAL;
switch (param->type) { case DEVLINK_PARAM_TYPE_U8: - value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u8)) + return -EINVAL; + value->vu8 = nla_get_u8(param_data); break; case DEVLINK_PARAM_TYPE_U16: - value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u16)) + return -EINVAL; + value->vu16 = nla_get_u16(param_data); break; case DEVLINK_PARAM_TYPE_U32: - value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + if (nla_len(param_data) != sizeof(u32)) + return -EINVAL; + value->vu32 = nla_get_u32(param_data); break; case DEVLINK_PARAM_TYPE_STRING: - len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]), - nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); - if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) || + len = strnlen(nla_data(param_data), nla_len(param_data)); + if (len == nla_len(param_data) || len >= __DEVLINK_PARAM_MAX_STRING_VALUE) return -EINVAL; - strcpy(value->vstr, - nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); + strcpy(value->vstr, nla_data(param_data)); break; case DEVLINK_PARAM_TYPE_BOOL: - value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? - true : false; + if (param_data && nla_len(param_data)) + return -EINVAL; + value->vbool = nla_get_flag(param_data); break; } return 0; @@@ -4233,17 -4239,11 +4240,17 @@@ struct devlink_fmsg_item int attrtype; u8 nla_type; u16 len; - int value[0]; + int value[]; };
struct devlink_fmsg { struct list_head item_list; + bool putting_binary; /* This flag forces enclosing of binary data + * in an array brackets. It forces using + * of designated API: + * devlink_fmsg_binary_pair_nest_start() + * devlink_fmsg_binary_pair_nest_end() + */ };
static struct devlink_fmsg *devlink_fmsg_alloc(void) @@@ -4287,26 -4287,17 +4294,26 @@@ static int devlink_fmsg_nest_common(str
int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START); } EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END); }
int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end); @@@ -4317,9 -4308,6 +4324,9 @@@ static int devlink_fmsg_put_name(struc { struct devlink_fmsg_item *item;
+ if (fmsg->putting_binary) + return -EINVAL; + if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE) return -EMSGSIZE;
@@@ -4340,9 -4328,6 +4347,9 @@@ int devlink_fmsg_pair_nest_start(struc { int err;
+ if (fmsg->putting_binary) + return -EINVAL; + err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START); if (err) return err; @@@ -4357,9 -4342,6 +4364,9 @@@ EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nes
int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end); @@@ -4369,9 -4351,6 +4376,9 @@@ int devlink_fmsg_arr_pair_nest_start(st { int err;
+ if (fmsg->putting_binary) + return -EINVAL; + err = devlink_fmsg_pair_nest_start(fmsg, name); if (err) return err; @@@ -4388,9 -4367,6 +4395,9 @@@ int devlink_fmsg_arr_pair_nest_end(stru { int err;
+ if (fmsg->putting_binary) + return -EINVAL; + err = devlink_fmsg_nest_end(fmsg); if (err) return err; @@@ -4403,30 -4379,6 +4410,30 @@@ } EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name) +{ + int err; + + err = devlink_fmsg_arr_pair_nest_start(fmsg, name); + if (err) + return err; + + fmsg->putting_binary = true; + return err; +} +EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start); + +int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg) +{ + if (!fmsg->putting_binary) + return -EINVAL; + + fmsg->putting_binary = false; + return devlink_fmsg_arr_pair_nest_end(fmsg); +} +EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end); + static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg, const void *value, u16 value_len, u8 value_nla_type) @@@ -4451,59 -4403,40 +4458,59 @@@
int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG); } EXPORT_SYMBOL_GPL(devlink_fmsg_bool_put);
int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8); } EXPORT_SYMBOL_GPL(devlink_fmsg_u8_put);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32); } EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64); } EXPORT_SYMBOL_GPL(devlink_fmsg_u64_put);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value) { + if (fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1, NLA_NUL_STRING); } EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
-static int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, - u16 value_len) +int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, + u16 value_len) { + if (!fmsg->putting_binary) + return -EINVAL; + return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY); } +EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, bool value) @@@ -4614,11 -4547,10 +4621,11 @@@ int devlink_fmsg_binary_pair_put(struc const void *value, u32 value_len) { u32 data_size; + int end_err; u32 offset; int err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, name); + err = devlink_fmsg_binary_pair_nest_start(fmsg, name); if (err) return err;
@@@ -4628,18 -4560,14 +4635,18 @@@ data_size = DEVLINK_FMSG_MAX_SIZE; err = devlink_fmsg_binary_put(fmsg, value + offset, data_size); if (err) - return err; + break; + /* Exit from loop with a break (instead of + * return) to make sure putting_binary is turned off in + * devlink_fmsg_binary_pair_nest_end + */ }
- err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + end_err = devlink_fmsg_binary_pair_nest_end(fmsg); + if (end_err) + err = end_err;
- return 0; + return err; } EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
@@@ -5541,9 -5469,6 +5548,9 @@@ static int devlink_trap_metadata_put(st if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) && nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT)) goto nla_put_failure; + if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) && + nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE)) + goto nla_put_failure;
nla_nest_end(msg, attr);
@@@ -6033,6 -5958,8 +6040,8 @@@ static const struct nla_policy devlink_ [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 }, + [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 }, + [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 }, [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 }, [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 }, @@@ -6807,7 -6734,6 +6816,7 @@@ static int __devlink_port_phys_port_nam
switch (attrs->flavour) { case DEVLINK_PORT_FLAVOUR_PHYSICAL: + case DEVLINK_PORT_FLAVOUR_VIRTUAL: if (!attrs->split) n = snprintf(name, len, "p%u", attrs->phys.port_number); else @@@ -7808,8 -7734,6 +7817,8 @@@ static const struct devlink_trap devlin DEVLINK_TRAP(NON_ROUTABLE, DROP), DEVLINK_TRAP(DECAP_ERROR, EXCEPTION), DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP), + DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP), + DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP), };
#define DEVLINK_TRAP_GROUP(_id) \ @@@ -7823,7 -7747,6 +7832,7 @@@ static const struct devlink_trap_group DEVLINK_TRAP_GROUP(L3_DROPS), DEVLINK_TRAP_GROUP(BUFFER_DROPS), DEVLINK_TRAP_GROUP(TUNNEL_DROPS), + DEVLINK_TRAP_GROUP(ACL_DROPS), };
static int devlink_trap_generic_verify(const struct devlink_trap *trap) @@@ -8215,14 -8138,12 +8224,14 @@@ devlink_trap_stats_update(struct devlin static void devlink_trap_report_metadata_fill(struct net_dm_hw_metadata *hw_metadata, const struct devlink_trap_item *trap_item, - struct devlink_port *in_devlink_port) + struct devlink_port *in_devlink_port, + const struct flow_action_cookie *fa_cookie) { struct devlink_trap_group_item *group_item = trap_item->group_item;
hw_metadata->trap_group_name = group_item->group->name; hw_metadata->trap_name = trap_item->trap->name; + hw_metadata->fa_cookie = fa_cookie;
spin_lock(&in_devlink_port->type_lock); if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH) @@@ -8236,12 -8157,9 +8245,12 @@@ * @skb: Trapped packet. * @trap_ctx: Trap context. * @in_devlink_port: Input devlink port. + * @fa_cookie: Flow action cookie. Could be NULL. */ void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb, - void *trap_ctx, struct devlink_port *in_devlink_port) + void *trap_ctx, struct devlink_port *in_devlink_port, + const struct flow_action_cookie *fa_cookie) + { struct devlink_trap_item *trap_item = trap_ctx; struct net_dm_hw_metadata hw_metadata = {}; @@@ -8250,7 -8168,7 +8259,7 @@@ devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
devlink_trap_report_metadata_fill(&hw_metadata, trap_item, - in_devlink_port); + in_devlink_port, fa_cookie); net_dm_hw_report(skb, &hw_metadata); } EXPORT_SYMBOL_GPL(devlink_trap_report); diff --combined net/core/sock.c index e4af4dbc1c9e,8f71684305c3..0fc8937a7ff4 --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -1572,14 -1572,13 +1572,14 @@@ static inline void sock_lock_init(struc */ static void sock_copy(struct sock *nsk, const struct sock *osk) { + const struct proto *prot = READ_ONCE(osk->sk_prot); #ifdef CONFIG_SECURITY_NETWORK void *sptr = nsk->sk_security; #endif memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, - osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); + prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
#ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; @@@ -1793,17 -1792,16 +1793,17 @@@ static void sk_init_common(struct sock */ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) { + struct proto *prot = READ_ONCE(sk->sk_prot); struct sock *newsk; bool is_charged = true;
- newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); + newsk = sk_prot_alloc(prot, priority, sk->sk_family); if (newsk != NULL) { struct sk_filter *filter;
sock_copy(newsk, sk);
- newsk->sk_prot_creator = sk->sk_prot; + newsk->sk_prot_creator = prot;
/* SANITY */ if (likely(newsk->sk_net_refcnt)) @@@ -1832,7 -1830,10 +1832,10 @@@ atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE); - mem_cgroup_sk_alloc(newsk); + + /* sk->sk_memcg will be populated at accept() time */ + newsk->sk_memcg = NULL; + cgroup_sk_alloc(&newsk->sk_cgrp_data);
rcu_read_lock(); @@@ -1865,12 -1866,6 +1868,12 @@@ goto out; }
+ /* Clear sk_user_data if parent had the pointer tagged + * as not suitable for copying when cloning. + */ + if (sk_user_data_is_nocopy(newsk)) + RCU_INIT_POINTER(newsk->sk_user_data, NULL); + newsk->sk_err = 0; newsk->sk_err_soft = 0; newsk->sk_priority = 0; diff --combined net/dsa/port.c index d4450a454249,ec13dc666788..e6875d8f944d --- a/net/dsa/port.c +++ b/net/dsa/port.c @@@ -63,7 -63,7 +63,7 @@@ static void dsa_port_set_state_now(stru pr_err("DSA: failed to set STP state %u (%d)\n", state, err); }
- int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) + int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) { struct dsa_switch *ds = dp->ds; int port = dp->index; @@@ -78,14 -78,31 +78,31 @@@ if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+ if (dp->pl) + phylink_start(dp->pl); + return 0; }
- void dsa_port_disable(struct dsa_port *dp) + int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) + { + int err; + + rtnl_lock(); + err = dsa_port_enable_rt(dp, phy); + rtnl_unlock(); + + return err; + } + + void dsa_port_disable_rt(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; int port = dp->index;
+ if (dp->pl) + phylink_stop(dp->pl); + if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_DISABLED);
@@@ -93,6 -110,13 +110,13 @@@ ds->ops->port_disable(ds, port); }
+ void dsa_port_disable(struct dsa_port *dp) + { + rtnl_lock(); + dsa_port_disable_rt(dp); + rtnl_unlock(); + } + int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br) { struct dsa_notifier_bridge_info info = { @@@ -489,11 -513,9 +513,11 @@@ static void dsa_port_phylink_mac_link_d }
static void dsa_port_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + int speed, int duplex, + bool tx_pause, bool rx_pause) { struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); struct dsa_switch *ds = dp->ds; @@@ -504,8 -526,7 +528,8 @@@ return; }
- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev); + ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, + speed, duplex, tx_pause, rx_pause); }
const struct phylink_mac_ops dsa_port_phylink_mac_ops = { @@@ -617,10 -638,6 +641,6 @@@ static int dsa_port_phylink_register(st goto err_phy_connect; }
- rtnl_lock(); - phylink_start(dp->pl); - rtnl_unlock(); - return 0;
err_phy_connect: @@@ -631,9 -648,14 +651,14 @@@ int dsa_port_link_register_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; + struct device_node *phy_np;
- if (!ds->ops->adjust_link) - return dsa_port_phylink_register(dp); + if (!ds->ops->adjust_link) { + phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); + if (of_phy_is_fixed_link(dp->dn) || phy_np) + return dsa_port_phylink_register(dp); + return 0; + }
dev_warn(ds->dev, "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); @@@ -648,11 -670,12 +673,12 @@@ void dsa_port_link_unregister_of(struc { struct dsa_switch *ds = dp->ds;
- if (!ds->ops->adjust_link) { + if (!ds->ops->adjust_link && dp->pl) { rtnl_lock(); phylink_disconnect_phy(dp->pl); rtnl_unlock(); phylink_destroy(dp->pl); + dp->pl = NULL; return; }
diff --combined net/dsa/slave.c index fca9bfa8437e,ddc0f9236928..c5beb3031a72 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@@ -88,12 -88,10 +88,10 @@@ static int dsa_slave_open(struct net_de goto clear_allmulti; }
- err = dsa_port_enable(dp, dev->phydev); + err = dsa_port_enable_rt(dp, dev->phydev); if (err) goto clear_promisc;
- phylink_start(dp->pl); - return 0;
clear_promisc: @@@ -114,9 -112,7 +112,7 @@@ static int dsa_slave_close(struct net_d struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev);
- phylink_stop(dp->pl); - - dsa_port_disable(dp); + dsa_port_disable_rt(dp);
dev_mc_unsync(master, dev); dev_uc_unsync(master, dev); @@@ -865,10 -861,6 +861,10 @@@ static int dsa_slave_add_cls_matchall(s if (!flow_offload_has_one_action(&cls->rule->action)) return err;
+ if (!flow_action_basic_hw_stats_types_check(&cls->rule->action, + cls->common.extack)) + return err; + act = &cls->rule->action.entries[0];
if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { @@@ -950,64 -942,6 +946,64 @@@ static int dsa_slave_setup_tc_cls_match } }
+static int dsa_slave_add_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_add) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_add(ds, port, cls, ingress); +} + +static int dsa_slave_del_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_del) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_del(ds, port, cls, ingress); +} + +static int dsa_slave_stats_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_stats) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_stats(ds, port, cls, ingress); +} + +static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + switch (cls->command) { + case FLOW_CLS_REPLACE: + return dsa_slave_add_cls_flower(dev, cls, ingress); + case FLOW_CLS_DESTROY: + return dsa_slave_del_cls_flower(dev, cls, ingress); + case FLOW_CLS_STATS: + return dsa_slave_stats_cls_flower(dev, cls, ingress); + default: + return -EOPNOTSUPP; + } +} + static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv, bool ingress) { @@@ -1019,8 -953,6 +1015,8 @@@ switch (type) { case TC_SETUP_CLSMATCHALL: return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); + case TC_SETUP_CLSFLOWER: + return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); default: return -EOPNOTSUPP; } diff --combined net/ipv4/inet_connection_sock.c index 3b4f81790e3e,d545fb99a8a1..5f34eb951627 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@@ -131,7 -131,7 +131,7 @@@ static int inet_csk_bind_conflict(cons { struct sock *sk2; bool reuse = sk->sk_reuse; - bool reuseport = !!sk->sk_reuseport && reuseport_ok; + bool reuseport = !!sk->sk_reuseport; kuid_t uid = sock_i_uid((struct sock *)sk);
/* @@@ -146,21 -146,17 +146,21 @@@ (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { - if ((!reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) && - (!reuseport || !sk2->sk_reuseport || - rcu_access_pointer(sk->sk_reuseport_cb) || - (sk2->sk_state != TCP_TIME_WAIT && - !uid_eq(uid, sock_i_uid(sk2))))) { - if (inet_rcv_saddr_equal(sk, sk2, true)) - break; - } - if (!relax && reuse && sk2->sk_reuse && + if (reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { + if ((!relax || + (!reuseport_ok && + reuseport && sk2->sk_reuseport && + !rcu_access_pointer(sk->sk_reuseport_cb) && + (sk2->sk_state == TCP_TIME_WAIT || + uid_eq(uid, sock_i_uid(sk2))))) && + inet_rcv_saddr_equal(sk, sk2, true)) + break; + } else if (!reuseport_ok || + !reuseport || !sk2->sk_reuseport || + rcu_access_pointer(sk->sk_reuseport_cb) || + (sk2->sk_state != TCP_TIME_WAIT && + !uid_eq(uid, sock_i_uid(sk2)))) { if (inet_rcv_saddr_equal(sk, sk2, true)) break; } @@@ -180,14 -176,12 +180,14 @@@ inet_csk_find_open_port(struct sock *sk int port = 0; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); + bool relax = false; int i, low, high, attempt_half; struct inet_bind_bucket *tb; u32 remaining, offset; int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk); +ports_exhausted: attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; other_half_scan: inet_get_local_port_range(net, &low, &high); @@@ -225,7 -219,7 +225,7 @@@ other_parity_scan inet_bind_bucket_for_each(tb, &head->chain) if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && tb->port == port) { - if (!inet_csk_bind_conflict(sk, tb, false, false)) + if (!inet_csk_bind_conflict(sk, tb, relax, false)) goto success; goto next_port; } @@@ -245,12 -239,6 +245,12 @@@ next_port attempt_half = 2; goto other_half_scan; } + + if (net->ipv4.sysctl_ip_autobind_reuse && !relax) { + /* We still have a chance to connect to different destinations */ + relax = true; + goto ports_exhausted; + } return NULL; success: *port_ret = port; @@@ -494,8 -482,28 +494,28 @@@ struct sock *inet_csk_accept(struct soc } spin_unlock_bh(&queue->fastopenq.lock); } + out: release_sock(sk); + if (newsk && mem_cgroup_sockets_enabled) { + int amt; + + /* atomically get the memory usage, set and charge the + * newsk->sk_memcg. + */ + lock_sock(newsk); + + /* The socket has not been accepted yet, no need to look at + * newsk->sk_wmem_queued. + */ + amt = sk_mem_pages(newsk->sk_forward_alloc + + atomic_read(&newsk->sk_rmem_alloc)); + mem_cgroup_sk_alloc(newsk); + if (newsk->sk_memcg && amt) + mem_cgroup_charge_skmem(newsk->sk_memcg, amt); + + release_sock(newsk); + } if (req) reqsk_put(req); return newsk; diff --combined net/ipv4/inet_diag.c index e1cad25909df,8c8377568a78..5d50aad3cdbf --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@@ -23,7 -23,6 +23,7 @@@ #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/inet6_hashtables.h> +#include <net/bpf_sk_storage.h> #include <net/netlink.h>
#include <linux/inet.h> @@@ -101,13 -100,9 +101,9 @@@ static size_t inet_sk_attr_size(struct aux = handler->idiag_get_aux_size(sk, net_admin);
return nla_total_size(sizeof(struct tcp_info)) - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ - + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + nla_total_size(TCP_CA_NAME_MAX) + nla_total_size(sizeof(struct tcpvegas_info)) @@@ -148,6 -143,24 +144,24 @@@ int inet_diag_msg_attrs_fill(struct soc if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark)) goto errout;
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || + ext & (1 << (INET_DIAG_TCLASS - 1))) { + u32 classid = 0; + + #ifdef CONFIG_SOCK_CGROUP_DATA + classid = sock_cgroup_classid(&sk->sk_cgrp_data); + #endif + /* Fallback to socket priority if class id isn't set. + * Classful qdiscs use it as direct reference to class. + * For cgroup2 classid is always zero. + */ + if (!classid) + classid = sk->sk_priority; + + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) + goto errout; + } + r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk);
@@@ -157,28 -170,26 +171,28 @@@ errout } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
+#define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, - bool net_admin) + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin) { const struct tcp_congestion_ops *ca_ops; const struct inet_diag_handler *handler; + struct inet_diag_dump_data *cb_data; int ext = req->idiag_ext; struct inet_diag_msg *r; struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL;
+ cb_data = cb->data; handler = inet_diag_table[req->sdiag_protocol]; BUG_ON(!handler);
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE;
@@@ -190,9 -201,7 +204,9 @@@ r->idiag_timer = 0; r->idiag_retrans = 0;
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) + if (inet_diag_msg_attrs_fill(sk, skb, r, ext, + sk_user_ns(NETLINK_CB(cb->skb).sk), + net_admin)) goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { @@@ -289,66 -298,6 +303,48 @@@ goto errout; }
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || - ext & (1 << (INET_DIAG_TCLASS - 1))) { - u32 classid = 0; - - #ifdef CONFIG_SOCK_CGROUP_DATA - classid = sock_cgroup_classid(&sk->sk_cgrp_data); - #endif - /* Fallback to socket priority if class id isn't set. - * Classful qdiscs use it as direct reference to class. - * For cgroup2 classid is always zero. - */ - if (!classid) - classid = sk->sk_priority; - - if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) - goto errout; - } - + /* Keep it at the end for potential retry with a larger skb, + * or else do best-effort fitting, which is only done for the + * first_nlmsg. + */ + if (cb_data->bpf_stg_diag) { + bool first_nlmsg = ((unsigned char *)nlh == skb->data); + unsigned int prev_min_dump_alloc; + unsigned int total_nla_size = 0; + unsigned int msg_len; + int err; + + msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; + err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb, + INET_DIAG_SK_BPF_STORAGES, + &total_nla_size); + + if (!err) + goto out; + + total_nla_size += msg_len; + prev_min_dump_alloc = cb->min_dump_alloc; + if (total_nla_size > prev_min_dump_alloc) + cb->min_dump_alloc = min_t(u32, total_nla_size, + MAX_DUMP_ALLOC_SIZE); + + if (!first_nlmsg) + goto errout; + + if (cb->min_dump_alloc > prev_min_dump_alloc) + /* Retry with pskb_expand_head() with + * __GFP_DIRECT_RECLAIM + */ + goto errout; + + WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc); + + /* Send what we have for this sk + * and move on to the next sk in the following + * dump() + */ + } + out: nlmsg_end(skb, nlh); return 0; @@@ -359,19 -308,30 +355,19 @@@ errout } EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
-static int inet_csk_diag_fill(struct sock *sk, - struct sk_buff *skb, - const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, - bool net_admin) -{ - return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns, - portid, seq, nlmsg_flags, unlh, net_admin); -} - static int inet_twsk_diag_fill(struct sock *sk, struct sk_buff *skb, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + struct netlink_callback *cb, + u16 nlmsg_flags) { struct inet_timewait_sock *tw = inet_twsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, + sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE;
@@@ -395,16 -355,16 +391,16 @@@ }
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin) + struct netlink_callback *cb, + u16 nlmsg_flags, bool net_admin) { struct request_sock *reqsk = inet_reqsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE;
@@@ -433,18 -393,21 +429,18 @@@ }
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, + struct netlink_callback *cb, const struct inet_diag_req_v2 *r, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin) + u16 nlmsg_flags, bool net_admin) { if (sk->sk_state == TCP_TIME_WAIT) - return inet_twsk_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh); + return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
if (sk->sk_state == TCP_NEW_SYN_RECV) - return inet_req_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh, net_admin); + return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
- return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, - nlmsg_flags, unlh, net_admin); + return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags, + net_admin); }
struct sock *inet_diag_find_one_icsk(struct net *net, @@@ -492,10 -455,10 +488,10 @@@ EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN); struct net *net = sock_net(in_skb->sk); struct sk_buff *rep; @@@ -512,7 -475,10 +508,7 @@@ goto out; }
- err = sk_diag_fill(sk, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, net_admin); + err = sk_diag_fill(sk, rep, cb, req, 0, net_admin); if (err < 0) { WARN_ON(err == -EMSGSIZE); nlmsg_free(rep); @@@ -539,21 -505,14 +535,21 @@@ static int inet_diag_cmd_exact(int cmd int err;
handler = inet_diag_lock_handler(req->sdiag_protocol); - if (IS_ERR(handler)) + if (IS_ERR(handler)) { err = PTR_ERR(handler); - else if (cmd == SOCK_DIAG_BY_FAMILY) - err = handler->dump_one(in_skb, nlh, req); - else if (cmd == SOCK_DESTROY && handler->destroy) + } else if (cmd == SOCK_DIAG_BY_FAMILY) { + struct inet_diag_dump_data empty_dump_data = {}; + struct netlink_callback cb = { + .nlh = nlh, + .skb = in_skb, + .data = &empty_dump_data, + }; + err = handler->dump_one(&cb, req); + } else if (cmd == SOCK_DESTROY && handler->destroy) { err = handler->destroy(in_skb, req); - else + } else { err = -EOPNOTSUPP; + } inet_diag_unlock_handler(handler);
return err; @@@ -884,6 -843,23 +880,6 @@@ static int inet_diag_bc_audit(const str return len == 0 ? 0 : -EINVAL; }
-static int inet_csk_diag_dump(struct sock *sk, - struct sk_buff *skb, - struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - const struct nlattr *bc, - bool net_admin) -{ - if (!inet_diag_bc_sk(bc, sk)) - return 0; - - return inet_csk_diag_fill(sk, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, - net_admin); -} - static void twsk_build_assert(void) { BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) != @@@ -912,17 -888,14 +908,17 @@@
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); + struct inet_diag_dump_data *cb_data = cb->data; struct net *net = sock_net(skb->sk); u32 idiag_states = r->idiag_states; int i, num, s_i, s_num; + struct nlattr *bc; struct sock *sk;
+ bc = cb_data->inet_diag_nla_bc; if (idiag_states & TCPF_SYN_RECV) idiag_states |= TCPF_NEW_SYN_RECV; s_i = cb->args[1]; @@@ -958,12 -931,8 +954,12 @@@ r->id.idiag_sport) goto next_listen;
- if (inet_csk_diag_dump(sk, skb, cb, r, - bc, net_admin) < 0) { + if (!inet_diag_bc_sk(bc, sk)) + goto next_listen; + + if (inet_sk_diag_fill(sk, inet_csk(sk), skb, + cb, r, NLM_F_MULTI, + net_admin) < 0) { spin_unlock(&ilb->lock); goto done; } @@@ -1041,8 -1010,11 +1037,8 @@@ next_normal res = 0; for (idx = 0; idx < accum; idx++) { if (res >= 0) { - res = sk_diag_fill(sk_arr[idx], skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh, net_admin); + res = sk_diag_fill(sk_arr[idx], skb, cb, r, + NLM_F_MULTI, net_admin); if (res < 0) num = num_arr[idx]; } @@@ -1066,101 -1038,31 +1062,101 @@@ out EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc) + const struct inet_diag_req_v2 *r) { const struct inet_diag_handler *handler; + u32 prev_min_dump_alloc; int err = 0;
+again: + prev_min_dump_alloc = cb->min_dump_alloc; handler = inet_diag_lock_handler(r->sdiag_protocol); if (!IS_ERR(handler)) - handler->dump(skb, cb, r, bc); + handler->dump(skb, cb, r); else err = PTR_ERR(handler); inet_diag_unlock_handler(handler);
+ /* The skb is not large enough to fit one sk info and + * inet_sk_diag_fill() has requested for a larger skb. + */ + if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) { + err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL); + if (!err) + goto again; + } + return err ? : skb->len; }
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { - int hdrlen = sizeof(struct inet_diag_req_v2); - struct nlattr *bc = NULL; + return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh)); +}
- if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); +static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) +{ + const struct nlmsghdr *nlh = cb->nlh; + struct inet_diag_dump_data *cb_data; + struct sk_buff *skb = cb->skb; + struct nlattr *nla; + int rem, err; + + cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL); + if (!cb_data) + return -ENOMEM; + + nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), rem) { + int type = nla_type(nla); + + if (type < __INET_DIAG_REQ_MAX) + cb_data->req_nlas[type] = nla; + } + + nla = cb_data->inet_diag_nla_bc; + if (nla) { + err = inet_diag_bc_audit(nla, skb); + if (err) { + kfree(cb_data); + return err; + } + } + + nla = cb_data->inet_diag_nla_bpf_stgs; + if (nla) { + struct bpf_sk_storage_diag *bpf_stg_diag; + + bpf_stg_diag = bpf_sk_storage_diag_alloc(nla); + if (IS_ERR(bpf_stg_diag)) { + kfree(cb_data); + return PTR_ERR(bpf_stg_diag); + } + cb_data->bpf_stg_diag = bpf_stg_diag; + } + + cb->data = cb_data; + return 0; +} + +static int inet_diag_dump_start(struct netlink_callback *cb) +{ + return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2)); +}
- return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc); +static int inet_diag_dump_start_compat(struct netlink_callback *cb) +{ + return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req)); +} + +static int inet_diag_dump_done(struct netlink_callback *cb) +{ + struct inet_diag_dump_data *cb_data = cb->data; + + bpf_sk_storage_diag_free(cb_data->bpf_stg_diag); + kfree(cb->data); + + return 0; }
static int inet_diag_type2proto(int type) @@@ -1179,7 -1081,9 +1175,7 @@@ static int inet_diag_dump_compat(struc struct netlink_callback *cb) { struct inet_diag_req *rc = nlmsg_data(cb->nlh); - int hdrlen = sizeof(struct inet_diag_req); struct inet_diag_req_v2 req; - struct nlattr *bc = NULL;
req.sdiag_family = AF_UNSPEC; /* compatibility */ req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); @@@ -1187,7 -1091,10 +1183,7 @@@ req.idiag_states = rc->idiag_states; req.id = rc->id;
- if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - - return __inet_diag_dump(skb, cb, &req, bc); + return __inet_diag_dump(skb, cb, &req); }
static int inet_diag_get_exact_compat(struct sk_buff *in_skb, @@@ -1215,12 -1122,22 +1211,12 @@@ static int inet_diag_rcv_msg_compat(str return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(nlh, hdrlen)) { - struct nlattr *attr; - int err; - - attr = nlmsg_find_attr(nlh, hdrlen, - INET_DIAG_REQ_BYTECODE); - err = inet_diag_bc_audit(attr, skb); - if (err) - return err; - } - { - struct netlink_dump_control c = { - .dump = inet_diag_dump_compat, - }; - return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); - } + struct netlink_dump_control c = { + .start = inet_diag_dump_start_compat, + .done = inet_diag_dump_done, + .dump = inet_diag_dump_compat, + }; + return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); }
return inet_diag_get_exact_compat(skb, nlh); @@@ -1236,12 -1153,22 +1232,12 @@@ static int inet_diag_handler_cmd(struc
if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(h, hdrlen)) { - struct nlattr *attr; - int err; - - attr = nlmsg_find_attr(h, hdrlen, - INET_DIAG_REQ_BYTECODE); - err = inet_diag_bc_audit(attr, skb); - if (err) - return err; - } - { - struct netlink_dump_control c = { - .dump = inet_diag_dump, - }; - return netlink_dump_start(net->diag_nlsk, skb, h, &c); - } + struct netlink_dump_control c = { + .start = inet_diag_dump_start, + .done = inet_diag_dump_done, + .dump = inet_diag_dump, + }; + return netlink_dump_start(net->diag_nlsk, skb, h, &c); }
return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h)); diff --combined net/ipv4/raw_diag.c index d19cce39be1b,a93e7d1e1251..1b5b8af27aaf --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@@ -87,29 -87,32 +87,30 @@@ out_unlock return sk ? sk : ERR_PTR(-ENOENT); }
-static int raw_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int raw_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { - struct net *net = sock_net(in_skb->sk); + struct sk_buff *in_skb = cb->skb; struct sk_buff *rep; struct sock *sk; + struct net *net; int err;
+ net = sock_net(in_skb->sk); sk = raw_sock_get(net, r); if (IS_ERR(sk)) return PTR_ERR(sk);
- rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + 64, + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) { sock_put(sk); return -ENOMEM; }
- err = inet_sk_diag_fill(sk, NULL, rep, r, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, + err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0, netlink_net_capable(in_skb, CAP_NET_ADMIN)); sock_put(sk);
@@@ -134,25 -137,25 +135,25 @@@ static int sk_diag_dump(struct sock *sk if (!inet_diag_bc_sk(bc, sk)) return 0;
- return inet_sk_diag_fill(sk, NULL, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh, net_admin); + return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin); }
static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; int num, s_num, slot, s_slot; struct sock *sk = NULL; + struct nlattr *bc;
if (IS_ERR(hashinfo)) return;
+ cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; s_slot = cb->args[0]; num = s_num = cb->args[1];
diff --combined net/ipv4/udp_diag.c index 93884696abdd,dccd2286bc28..1dbece34496e --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@@ -21,15 -21,16 +21,15 @@@ static int sk_diag_dump(struct sock *sk if (!inet_diag_bc_sk(bc, sk)) return 0;
- return inet_sk_diag_fill(sk, NULL, skb, req, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin); + return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI, + net_admin); }
-static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int udp_dump_one(struct udp_table *tbl, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; int err = -EINVAL; struct sock *sk = NULL; struct sk_buff *rep; @@@ -63,14 -64,18 +63,15 @@@ goto out;
err = -ENOMEM; - rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + 64, + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) goto out;
- err = inet_sk_diag_fill(sk, NULL, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, - netlink_net_capable(in_skb, CAP_NET_ADMIN)); + err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(rep); @@@ -89,16 -94,12 +90,16 @@@ out_nosk
static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; int num, s_num, slot, s_slot; + struct nlattr *bc;
+ cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; s_slot = cb->args[0]; num = s_num = cb->args[1];
@@@ -146,15 -147,15 +147,15 @@@ done }
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - udp_dump(&udp_table, skb, cb, r, bc); + udp_dump(&udp_table, skb, cb, r); }
-static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, +static int udp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return udp_dump_one(&udp_table, in_skb, nlh, req); + return udp_dump_one(&udp_table, cb, req); }
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, @@@ -249,15 -250,16 +250,15 @@@ static const struct inet_diag_handler u };
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - udp_dump(&udplite_table, skb, cb, r, bc); + udp_dump(&udplite_table, skb, cb, r); }
-static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, +static int udplite_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return udp_dump_one(&udplite_table, in_skb, nlh, req); + return udp_dump_one(&udplite_table, cb, req); }
static const struct inet_diag_handler udplite_diag_handler = { diff --combined net/ipv6/addrconf.c index c614249dfb7d,46d614b611db..5b9de773ce73 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -1226,11 -1226,13 +1226,13 @@@ check_cleanup_prefix_route(struct inet6 }
static void - cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt) + cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, + bool del_rt, bool del_peer) { struct fib6_info *f6i;
- f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, + f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (f6i) { if (del_rt) @@@ -1293,7 -1295,7 +1295,7 @@@ static void ipv6_del_addr(struct inet6_
if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, expires, - action == CLEANUP_PREFIX_RT_DEL); + action == CLEANUP_PREFIX_RT_DEL, false); }
/* clean up prefsrc entries */ @@@ -3299,7 -3301,7 +3301,7 @@@ static void addrconf_addr_gen(struct in switch (idev->cnf.addr_gen_mode) { case IN6_ADDR_GEN_MODE_RANDOM: ipv6_gen_mode_random_init(idev); - /* fallthrough */ + fallthrough; case IN6_ADDR_GEN_MODE_STABLE_PRIVACY: if (!ipv6_generate_stable_address(&addr, 0, idev)) addrconf_add_linklocal(idev, &addr, @@@ -3345,6 -3347,10 +3347,10 @@@ static void addrconf_dev_config(struct (dev->type != ARPHRD_NONE) && (dev->type != ARPHRD_RAWIP)) { /* Alas, we support only Ethernet autoconfiguration. */ + idev = __in6_dev_get(dev); + if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP && + dev->flags & IFF_MULTICAST) + ipv6_mc_up(idev); return; }
@@@ -3517,7 -3523,9 +3523,7 @@@ static int addrconf_notify(struct notif break;
run_pending = 1; - - /* fall through */ - + fallthrough; case NETDEV_UP: case NETDEV_CHANGE: if (dev->flags & IFF_SLAVE) @@@ -4584,12 -4592,14 +4590,14 @@@ inet6_rtm_deladdr(struct sk_buff *skb, }
static int modify_prefix_route(struct inet6_ifaddr *ifp, - unsigned long expires, u32 flags) + unsigned long expires, u32 flags, + bool modify_peer) { struct fib6_info *f6i; u32 prio;
- f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, + f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (!f6i) return -ENOENT; @@@ -4600,7 -4610,8 +4608,8 @@@ ip6_del_rt(dev_net(ifp->idev->dev), f6i);
/* add new one */ - addrconf_prefix_route(&ifp->addr, ifp->prefix_len, + addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, + ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } else { @@@ -4622,6 -4633,7 +4631,7 @@@ static int inet6_addr_modify(struct ine unsigned long timeout; bool was_managetempaddr; bool had_prefixroute; + bool new_peer = false;
ASSERT_RTNL();
@@@ -4653,6 -4665,13 +4663,13 @@@ cfg->preferred_lft = timeout; }
+ if (cfg->peer_pfx && + memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) { + if (!ipv6_addr_any(&ifp->peer_addr)) + cleanup_prefix_route(ifp, expires, true, true); + new_peer = true; + } + spin_lock_bh(&ifp->lock); was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; had_prefixroute = ifp->flags & IFA_F_PERMANENT && @@@ -4668,6 -4687,9 +4685,9 @@@ if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) ifp->rt_priority = cfg->rt_priority;
+ if (new_peer) + ifp->peer_addr = *cfg->peer_pfx; + spin_unlock_bh(&ifp->lock); if (!(ifp->flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); @@@ -4676,7 -4698,7 +4696,7 @@@ int rc = -ENOENT;
if (had_prefixroute) - rc = modify_prefix_route(ifp, expires, flags); + rc = modify_prefix_route(ifp, expires, flags, false);
/* prefix route could have been deleted; if so restore it */ if (rc == -ENOENT) { @@@ -4684,6 -4706,15 +4704,15 @@@ ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } + + if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) + rc = modify_prefix_route(ifp, expires, flags, true); + + if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { + addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, + ifp->rt_priority, ifp->idev->dev, + expires, flags, GFP_KERNEL); + } } else if (had_prefixroute) { enum cleanup_prefix_rt_t action; unsigned long rt_expires; @@@ -4694,7 -4725,7 +4723,7 @@@
if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, rt_expires, - action == CLEANUP_PREFIX_RT_DEL); + action == CLEANUP_PREFIX_RT_DEL, false); } }
@@@ -5981,9 -6012,9 +6010,9 @@@ static void __ipv6_ifa_notify(int event if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); if (!ipv6_addr_any(&ifp->peer_addr)) - addrconf_prefix_route(&ifp->peer_addr, 128, 0, - ifp->idev->dev, 0, 0, - GFP_ATOMIC); + addrconf_prefix_route(&ifp->peer_addr, 128, + ifp->rt_priority, ifp->idev->dev, + 0, 0, GFP_ATOMIC); break; case RTM_DELADDR: if (ifp->idev->cnf.forwarding) diff --combined net/ipv6/seg6_iptunnel.c index d8afe7290de8,8c52efe299cc..ac837afb9040 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@@ -29,7 -29,7 +29,7 @@@
struct seg6_lwt { struct dst_cache cache; - struct seg6_iptunnel_encap tuninfo[0]; + struct seg6_iptunnel_encap tuninfo[]; };
static inline struct seg6_lwt *seg6_lwt_lwtunnel(struct lwtunnel_state *lwt) @@@ -268,7 -268,7 +268,7 @@@ static int seg6_do_srh(struct sk_buff * skb_mac_header_rebuild(skb); skb_push(skb, skb->mac_len);
- err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE); + err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET); if (err) return err;
diff --combined net/mptcp/options.c index b9a8305bd934,fd2c3150e591..9c71f427e6e3 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@@ -304,22 -304,21 +304,22 @@@ static bool mptcp_established_options_m static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, struct mptcp_ext *ext) { - ext->data_fin = 1; - if (!ext->use_map) { /* RFC6824 requires a DSS mapping with specific values * if DATA_FIN is set but no data payload is mapped */ + ext->data_fin = 1; ext->use_map = 1; ext->dsn64 = 1; - ext->data_seq = mptcp_sk(subflow->conn)->write_seq; + ext->data_seq = subflow->data_fin_tx_seq; ext->subflow_seq = 0; ext->data_len = 1; - } else { - /* If there's an existing DSS mapping, DATA_FIN consumes - * 1 additional byte of mapping space. + } else if (ext->data_seq + ext->data_len == subflow->data_fin_tx_seq) { + /* If there's an existing DSS mapping and it is the + * final mapping, DATA_FIN consumes 1 additional byte of + * mapping space. */ + ext->data_fin = 1; ext->data_len++; } } @@@ -335,6 -334,8 +335,8 @@@ static bool mptcp_established_options_d struct mptcp_sock *msk; unsigned int ack_size; bool ret = false; + bool can_ack; + u64 ack_seq; u8 tcp_fin;
if (skb) { @@@ -355,14 -356,28 +357,27 @@@ if (mpext) opts->ext_copy = *mpext;
- if (skb && tcp_fin && - subflow->conn->sk_state != TCP_ESTABLISHED) + if (skb && tcp_fin && subflow->data_fin_tx_enable) mptcp_write_data_fin(subflow, &opts->ext_copy); ret = true; }
+ /* passive sockets msk will set the 'can_ack' after accept(), even + * if the first subflow may have the already the remote key handy + */ + can_ack = true; opts->ext_copy.use_ack = 0; msk = mptcp_sk(subflow->conn); - if (!msk || !READ_ONCE(msk->can_ack)) { + if (likely(msk && READ_ONCE(msk->can_ack))) { + ack_seq = msk->ack_seq; + } else if (subflow->can_ack) { + mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); + ack_seq++; + } else { + can_ack = false; + } + + if (unlikely(!can_ack)) { *size = ALIGN(dss_size, 4); return ret; } @@@ -375,7 -390,7 +390,7 @@@
dss_size += ack_size;
- opts->ext_copy.data_ack = msk->ack_seq; + opts->ext_copy.data_ack = ack_seq; opts->ext_copy.ack64 = 1; opts->ext_copy.use_ack = 1;
diff --combined net/netlink/af_netlink.c index 19df49a6ad15,5313f1cec170..ed77c75bf63f --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -71,7 -71,7 +71,7 @@@
struct listeners { struct rcu_head rcu; - unsigned long masks[0]; + unsigned long masks[]; };
/* state bits */ @@@ -2434,7 -2434,7 +2434,7 @@@ void netlink_ack(struct sk_buff *in_skb in_skb->len)) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, (u8 *)extack->bad_attr - - in_skb->data)); + (u8 *)nlh)); } else { if (extack->cookie_len) WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, @@@ -2583,7 -2583,6 +2583,7 @@@ static void *__netlink_seq_next(struct }
static void *netlink_seq_start(struct seq_file *seq, loff_t *posp) + __acquires(RCU) { struct nl_seq_iter *iter = seq->private; void *obj = SEQ_START_TOKEN; diff --combined net/sctp/diag.c index 69743a6aaf6f,1069d7af3672..493fc01e5d2b --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@@ -237,15 -237,11 +237,11 @@@ static size_t inet_assoc_attr_size(stru addrcnt++;
return nla_total_size(sizeof(struct sctp_info)) - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ + nla_total_size(addrlen * asoc->peer.transport_count) + nla_total_size(addrlen * addrcnt) - + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64; }
@@@ -432,12 -428,11 +428,12 @@@ static void sctp_diag_get_info(struct s sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo); }
-static int sctp_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int sctp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; struct net *net = sock_net(in_skb->sk); + const struct nlmsghdr *nlh = cb->nlh; union sctp_addr laddr, paddr; struct sctp_comm_param commp = { .skb = in_skb, @@@ -471,7 -466,7 +467,7 @@@ }
static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { u32 idiag_states = r->idiag_states; struct net *net = sock_net(skb->sk); diff --combined net/smc/smc_ib.c index 9239cf881f21,05b825b3cfa4..04b6fefb8bce --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@@ -37,7 -37,11 +37,7 @@@ struct smc_ib_devices smc_ib_devices = .list = LIST_HEAD_INIT(smc_ib_devices.list), };
-#define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%" - -u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system - * identifier - */ +u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
static int smc_ib_modify_qp_init(struct smc_link *lnk) { @@@ -164,15 -168,6 +164,15 @@@ static inline void smc_ib_define_local_ { memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], sizeof(smcibdev->mac[ibport - 1])); +} + +bool smc_ib_is_valid_local_systemid(void) +{ + return !is_zero_ether_addr(&local_systemid[2]); +} + +static void smc_ib_init_local_systemid(void) +{ get_random_bytes(&local_systemid[0], 2); }
@@@ -229,7 -224,8 +229,7 @@@ static int smc_ib_remember_port_attr(st rc = smc_ib_fill_mac(smcibdev, ibport); if (rc) goto out; - if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, - sizeof(local_systemid)) && + if (!smc_ib_is_valid_local_systemid() && smc_ib_port_active(smcibdev, ibport)) /* create unique system identifier */ smc_ib_define_local_systemid(smcibdev, ibport); @@@ -261,7 -257,6 +261,7 @@@ static void smc_ib_global_event_handler struct ib_event *ibevent) { struct smc_ib_device *smcibdev; + bool schedule = false; u8 port_idx;
smcibdev = container_of(handler, struct smc_ib_device, event_handler); @@@ -271,35 -266,22 +271,35 @@@ /* terminate all ports on device */ for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) { set_bit(port_idx, &smcibdev->port_event_mask); - set_bit(port_idx, smcibdev->ports_going_away); + if (!test_and_set_bit(port_idx, + smcibdev->ports_going_away)) + schedule = true; } - schedule_work(&smcibdev->port_event_work); + if (schedule) + schedule_work(&smcibdev->port_event_work); break; - case IB_EVENT_PORT_ERR: case IB_EVENT_PORT_ACTIVE: - case IB_EVENT_GID_CHANGE: port_idx = ibevent->element.port_num - 1; - if (port_idx < SMC_MAX_PORTS) { - set_bit(port_idx, &smcibdev->port_event_mask); - if (ibevent->event == IB_EVENT_PORT_ERR) - set_bit(port_idx, smcibdev->ports_going_away); - else if (ibevent->event == IB_EVENT_PORT_ACTIVE) - clear_bit(port_idx, smcibdev->ports_going_away); + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + if (test_and_clear_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); - } + break; + case IB_EVENT_PORT_ERR: + port_idx = ibevent->element.port_num - 1; + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) + schedule_work(&smcibdev->port_event_work); + break; + case IB_EVENT_GID_CHANGE: + port_idx = ibevent->element.port_num - 1; + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + schedule_work(&smcibdev->port_event_work); break; default: break; @@@ -334,11 -316,11 +334,11 @@@ static void smc_ib_qp_event_handler(str case IB_EVENT_QP_FATAL: case IB_EVENT_QP_ACCESS_ERR: port_idx = ibevent->element.qp->port - 1; - if (port_idx < SMC_MAX_PORTS) { - set_bit(port_idx, &smcibdev->port_event_mask); - set_bit(port_idx, smcibdev->ports_going_away); + if (port_idx >= SMC_MAX_PORTS) + break; + set_bit(port_idx, &smcibdev->port_event_mask); + if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); - } break; default: break; @@@ -600,6 -582,7 +600,7 @@@ static void smc_ib_remove_dev(struct ib smc_smcr_terminate_all(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev); ib_unregister_event_handler(&smcibdev->event_handler); + cancel_work_sync(&smcibdev->port_event_work); kfree(smcibdev); }
@@@ -611,7 -594,6 +612,7 @@@ static struct ib_client smc_ib_client
int __init smc_ib_register_client(void) { + smc_ib_init_local_systemid(); return ib_register_client(&smc_ib_client); }
diff --combined net/wireless/nl80211.c index 15000275b32d,ec5d67794aab..750b73a52fd8 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -322,29 -322,6 +322,29 @@@ he_obss_pd_policy[NL80211_HE_OBSS_PD_AT NLA_POLICY_RANGE(NLA_U8, 1, 20), };
+static const struct nla_policy +he_bss_color_policy[NL80211_HE_BSS_COLOR_ATTR_MAX + 1] = { + [NL80211_HE_BSS_COLOR_ATTR_COLOR] = NLA_POLICY_RANGE(NLA_U8, 1, 63), + [NL80211_HE_BSS_COLOR_ATTR_DISABLED] = { .type = NLA_FLAG }, + [NL80211_HE_BSS_COLOR_ATTR_PARTIAL] = { .type = NLA_FLAG }, +}; + +static const struct nla_policy +nl80211_tid_config_attr_policy[NL80211_TID_CONFIG_ATTR_MAX + 1] = { + [NL80211_TID_CONFIG_ATTR_VIF_SUPP] = { .type = NLA_U64 }, + [NL80211_TID_CONFIG_ATTR_PEER_SUPP] = { .type = NLA_U64 }, + [NL80211_TID_CONFIG_ATTR_OVERRIDE] = { .type = NLA_FLAG }, + [NL80211_TID_CONFIG_ATTR_TIDS] = NLA_POLICY_RANGE(NLA_U16, 1, 0xff), + [NL80211_TID_CONFIG_ATTR_NOACK] = + NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), + [NL80211_TID_CONFIG_ATTR_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_TID_CONFIG_ATTR_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_TID_CONFIG_ATTR_AMPDU_CTRL] = + NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), + [NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL] = + NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), +}; + const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, @@@ -385,7 -362,7 +385,7 @@@ [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, - [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5), + [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 7), [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, @@@ -493,6 -470,8 +493,8 @@@ [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_PLINK_STATE] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1), + [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 }, + [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG }, [NL80211_ATTR_MESH_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, @@@ -554,6 -533,8 +556,8 @@@ [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 }, + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 }, [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, @@@ -584,6 -565,7 +588,7 @@@ NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, + [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_MAC_MASK] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN @@@ -650,9 -632,6 +655,9 @@@ [NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG }, [NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy), [NL80211_ATTR_VLAN_ID] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2), + [NL80211_ATTR_HE_BSS_COLOR] = NLA_POLICY_NESTED(he_bss_color_policy), + [NL80211_ATTR_TID_CONFIG] = + NLA_POLICY_NESTED_ARRAY(nl80211_tid_config_attr_policy), };
/* policy for the key attributes */ @@@ -992,9 -971,6 +997,9 @@@ static int nl80211_msg_put_channel(stru if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_NO_HE) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE)) + goto nla_put_failure; }
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, @@@ -1056,7 -1032,7 +1061,7 @@@ struct key_parse struct key_params p; int idx; int type; - bool def, defmgmt; + bool def, defmgmt, defbeacon; bool def_uni, def_multi; };
@@@ -1072,13 -1048,12 +1077,13 @@@ static int nl80211_parse_key_new(struc
k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; + k->defbeacon = !!tb[NL80211_KEY_DEFAULT_BEACON];
if (k->def) { k->def_uni = true; k->def_multi = true; } - if (k->defmgmt) + if (k->defmgmt || k->defbeacon) k->def_multi = true;
if (tb[NL80211_KEY_IDX]) @@@ -1185,17 -1160,14 +1190,17 @@@ static int nl80211_parse_key(struct gen if (err) return err;
- if (k->def && k->defmgmt) { - GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid"); + if ((k->def ? 1 : 0) + (k->defmgmt ? 1 : 0) + + (k->defbeacon ? 1 : 0) > 1) { + GENL_SET_ERR_MSG(info, + "key with multiple default flags is invalid"); return -EINVAL; }
- if (k->defmgmt) { + if (k->defmgmt || k->defbeacon) { if (k->def_uni || !k->def_multi) { - GENL_SET_ERR_MSG(info, "defmgmt key must be mcast"); + GENL_SET_ERR_MSG(info, + "defmgmt/defbeacon key must be mcast"); return -EINVAL; } } @@@ -1207,20 -1179,14 +1212,20 @@@ "defmgmt key idx not 4 or 5"); return -EINVAL; } + } else if (k->defbeacon) { + if (k->idx < 6 || k->idx > 7) { + GENL_SET_ERR_MSG(info, + "defbeacon key idx not 6 or 7"); + return -EINVAL; + } } else if (k->def) { if (k->idx < 0 || k->idx > 3) { GENL_SET_ERR_MSG(info, "def key idx not 0-3"); return -EINVAL; } } else { - if (k->idx < 0 || k->idx > 5) { - GENL_SET_ERR_MSG(info, "key idx not 0-5"); + if (k->idx < 0 || k->idx > 7) { + GENL_SET_ERR_MSG(info, "key idx not 0-7"); return -EINVAL; } } @@@ -1926,88 -1892,6 +1931,88 @@@ static int nl80211_send_pmsr_capa(struc return 0; }
+static int +nl80211_put_iftype_akm_suites(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) +{ + int i; + struct nlattr *nested, *nested_akms; + const struct wiphy_iftype_akm_suites *iftype_akms; + + if (!rdev->wiphy.num_iftype_akm_suites || + !rdev->wiphy.iftype_akm_suites) + return 0; + + nested = nla_nest_start(msg, NL80211_ATTR_IFTYPE_AKM_SUITES); + if (!nested) + return -ENOBUFS; + + for (i = 0; i < rdev->wiphy.num_iftype_akm_suites; i++) { + nested_akms = nla_nest_start(msg, i + 1); + if (!nested_akms) + return -ENOBUFS; + + iftype_akms = &rdev->wiphy.iftype_akm_suites[i]; + + if (nl80211_put_iftypes(msg, NL80211_IFTYPE_AKM_ATTR_IFTYPES, + iftype_akms->iftypes_mask)) + return -ENOBUFS; + + if (nla_put(msg, NL80211_IFTYPE_AKM_ATTR_SUITES, + sizeof(u32) * iftype_akms->n_akm_suites, + iftype_akms->akm_suites)) { + return -ENOBUFS; + } + nla_nest_end(msg, nested_akms); + } + + nla_nest_end(msg, nested); + + return 0; +} + +static int +nl80211_put_tid_config_support(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) +{ + struct nlattr *supp; + + if (!rdev->wiphy.tid_config_support.vif && + !rdev->wiphy.tid_config_support.peer) + return 0; + + supp = nla_nest_start(msg, NL80211_ATTR_TID_CONFIG); + if (!supp) + return -ENOSPC; + + if (rdev->wiphy.tid_config_support.vif && + nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_VIF_SUPP, + rdev->wiphy.tid_config_support.vif, + NL80211_TID_CONFIG_ATTR_PAD)) + goto fail; + + if (rdev->wiphy.tid_config_support.peer && + nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_PEER_SUPP, + rdev->wiphy.tid_config_support.peer, + NL80211_TID_CONFIG_ATTR_PAD)) + goto fail; + + /* for now we just use the same value ... makes more sense */ + if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_SHORT, + rdev->wiphy.tid_config_support.max_retry)) + goto fail; + if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_LONG, + rdev->wiphy.tid_config_support.max_retry)) + goto fail; + + nla_nest_end(msg, supp); + + return 0; +fail: + nla_nest_cancel(msg, supp); + return -ENOBUFS; +} + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@@ -2566,12 -2450,6 +2571,12 @@@ static int nl80211_send_wiphy(struct cf rdev->wiphy.akm_suites)) goto nla_put_failure;
+ if (nl80211_put_iftype_akm_suites(rdev, msg)) + goto nla_put_failure; + + if (nl80211_put_tid_config_support(rdev, msg)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@@ -3603,7 -3481,7 +3608,7 @@@ static int nl80211_valid_4addr(struct c enum nl80211_iftype iftype) { if (!use_4addr) { - if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT)) + if (netdev && netif_is_bridge_port(netdev)) return -EBUSY; return 0; } @@@ -3891,14 -3769,8 +3896,14 @@@ static int nl80211_get_key(struct sk_bu void *hdr; struct sk_buff *msg;
- if (info->attrs[NL80211_ATTR_KEY_IDX]) + if (info->attrs[NL80211_ATTR_KEY_IDX]) { key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); + if (key_idx > 5 && + !wiphy_ext_feature_isset( + &rdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION)) + return -EINVAL; + }
if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@@ -3974,7 -3846,7 +3979,7 @@@ static int nl80211_set_key(struct sk_bu /* Only support setting default key and * Extended Key ID action NL80211_KEY_SET_TX. */ - if (!key.def && !key.defmgmt && + if (!key.def && !key.defmgmt && !key.defbeacon && !(key.p.mode == NL80211_KEY_SET_TX)) return -EINVAL;
@@@ -4021,24 -3893,6 +4026,24 @@@ #ifdef CONFIG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; #endif + } else if (key.defbeacon) { + if (key.def_uni || !key.def_multi) { + err = -EINVAL; + goto out; + } + + if (!rdev->ops->set_default_beacon_key) { + err = -EOPNOTSUPP; + goto out; + } + + err = nl80211_key_allowed(dev->ieee80211_ptr); + if (err) + goto out; + + err = rdev_set_default_beacon_key(rdev, dev, key.idx); + if (err) + goto out; } else if (key.p.mode == NL80211_KEY_SET_TX && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID)) { @@@ -4076,10 -3930,8 +4081,10 @@@ static int nl80211_new_key(struct sk_bu if (err) return err;
- if (!key.p.key) + if (!key.p.key) { + GENL_SET_ERR_MSG(info, "no key"); return -EINVAL; + }
if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@@ -4093,10 -3945,8 +4098,10 @@@
/* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && - key.type != NL80211_KEYTYPE_GROUP) + key.type != NL80211_KEYTYPE_GROUP) { + GENL_SET_ERR_MSG(info, "key type not pairwise or group"); return -EINVAL; + }
if (key.type == NL80211_KEYTYPE_GROUP && info->attrs[NL80211_ATTR_VLAN_ID]) @@@ -4107,22 -3957,15 +4112,22 @@@
if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, - mac_addr)) + mac_addr)) { + GENL_SET_ERR_MSG(info, "key setting validation failed"); return -EINVAL; + }
wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); - if (!err) + if (err) + GENL_SET_ERR_MSG(info, "key not allowed"); + if (!err) { err = rdev_add_key(rdev, dev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); + if (err) + GENL_SET_ERR_MSG(info, "key addition failed"); + } wdev_unlock(dev->ieee80211_ptr);
return err; @@@ -4675,30 -4518,6 +4680,30 @@@ static int nl80211_parse_he_obss_pd(str return 0; }
+static int nl80211_parse_he_bss_color(struct nlattr *attrs, + struct cfg80211_he_bss_color *he_bss_color) +{ + struct nlattr *tb[NL80211_HE_BSS_COLOR_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NL80211_HE_BSS_COLOR_ATTR_MAX, attrs, + he_bss_color_policy, NULL); + if (err) + return err; + + if (!tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]) + return -EINVAL; + + he_bss_color->color = + nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]); + he_bss_color->disabled = + nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); + he_bss_color->partial = + nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]); + + return 0; +} + static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, const u8 *rates) { @@@ -4990,14 -4809,6 +4995,14 @@@ static int nl80211_start_ap(struct sk_b goto out; }
+ if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) { + err = nl80211_parse_he_bss_color( + info->attrs[NL80211_ATTR_HE_BSS_COLOR], + ¶ms.he_bss_color); + if (err) + return err; + } + nl80211_calculate_ap_params(¶ms);
if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) @@@ -10734,9 -10545,8 +10739,9 @@@ static int nl80211_register_mgmt(struc return -EOPNOTSUPP;
return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type, - nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), - nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); + nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), + nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]), + info->extack); }
static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) @@@ -13998,141 -13808,6 +14003,141 @@@ static int nl80211_probe_mesh_link(stru return rdev_probe_mesh_link(rdev, dev, dest, buf, len); }
+static int parse_tid_conf(struct cfg80211_registered_device *rdev, + struct nlattr *attrs[], struct net_device *dev, + struct cfg80211_tid_cfg *tid_conf, + struct genl_info *info, const u8 *peer) +{ + struct netlink_ext_ack *extack = info->extack; + u64 mask; + int err; + + if (!attrs[NL80211_TID_CONFIG_ATTR_TIDS]) + return -EINVAL; + + tid_conf->config_override = + nla_get_flag(attrs[NL80211_TID_CONFIG_ATTR_OVERRIDE]); + tid_conf->tids = nla_get_u16(attrs[NL80211_TID_CONFIG_ATTR_TIDS]); + + if (tid_conf->config_override) { + if (rdev->ops->reset_tid_config) { + err = rdev_reset_tid_config(rdev, dev, peer, + tid_conf->tids); + /* If peer is there no other configuration will be + * allowed + */ + if (err || peer) + return err; + } else { + return -EINVAL; + } + } + + if (attrs[NL80211_TID_CONFIG_ATTR_NOACK]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_NOACK); + tid_conf->noack = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_NOACK]); + } + + if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT); + tid_conf->retry_short = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]); + + if (tid_conf->retry_short > rdev->wiphy.max_data_retry_count) + return -EINVAL; + } + + if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG); + tid_conf->retry_long = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]); + + if (tid_conf->retry_long > rdev->wiphy.max_data_retry_count) + return -EINVAL; + } + + if (attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); + tid_conf->ampdu = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]); + } + + if (attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]) { + tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL); + tid_conf->rtscts = + nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]); + } + + if (peer) + mask = rdev->wiphy.tid_config_support.peer; + else + mask = rdev->wiphy.tid_config_support.vif; + + if (tid_conf->mask & ~mask) { + NL_SET_ERR_MSG(extack, "unsupported TID configuration"); + return -ENOTSUPP; + } + + return 0; +} + +static int nl80211_set_tid_config(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct nlattr *attrs[NL80211_TID_CONFIG_ATTR_MAX + 1]; + struct net_device *dev = info->user_ptr[1]; + struct cfg80211_tid_config *tid_config; + struct nlattr *tid; + int conf_idx = 0, rem_conf; + int ret = -EINVAL; + u32 num_conf = 0; + + if (!info->attrs[NL80211_ATTR_TID_CONFIG]) + return -EINVAL; + + if (!rdev->ops->set_tid_config) + return -EOPNOTSUPP; + + nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG], + rem_conf) + num_conf++; + + tid_config = kzalloc(struct_size(tid_config, tid_conf, num_conf), + GFP_KERNEL); + if (!tid_config) + return -ENOMEM; + + tid_config->n_tid_conf = num_conf; + + if (info->attrs[NL80211_ATTR_MAC]) + tid_config->peer = nla_data(info->attrs[NL80211_ATTR_MAC]); + + nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG], + rem_conf) { + ret = nla_parse_nested(attrs, NL80211_TID_CONFIG_ATTR_MAX, + tid, NULL, NULL); + + if (ret) + goto bad_tid_conf; + + ret = parse_tid_conf(rdev, attrs, dev, + &tid_config->tid_conf[conf_idx], + info, tid_config->peer); + if (ret) + goto bad_tid_conf; + + conf_idx++; + } + + ret = rdev_set_tid_config(rdev, dev, tid_config); + +bad_tid_conf: + kfree(tid_config); + return ret; +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@@ -15087,13 -14762,6 +15092,13 @@@ static const struct genl_ops nl80211_op .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_SET_TID_CONFIG, + .doit = nl80211_set_tid_config, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV | + NL80211_FLAG_NEED_RTNL, + }, };
static struct genl_family nl80211_fam __ro_after_init = { diff --combined tools/testing/selftests/tc-testing/config index c812faa29f36,c03af4600281..c33a7aac27ff --- a/tools/testing/selftests/tc-testing/config +++ b/tools/testing/selftests/tc-testing/config @@@ -31,7 -31,6 +31,7 @@@ CONFIG_NET_EMATCH_U32= CONFIG_NET_EMATCH_META=m CONFIG_NET_EMATCH_TEXT=m CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_CANID=m CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m @@@ -58,8 -57,4 +58,9 @@@ CONFIG_NET_IFE_SKBMARK= CONFIG_NET_IFE_SKBPRIO=m CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_SCH_FIFO=y + CONFIG_NET_SCH_ETS=m + +# +## Network testing +# +CONFIG_CAN=m