The following commit has been merged in the master branch: commit 0140a7168f8b2732f622fa2c500f1f8be212382a Merge: f8b2cce430d92ec927915ba4bc8088fe99659dbc 504c25cb76a9cb805407f7701b25a1fbd48605fa Author: Jakub Kicinski kuba@kernel.org Date: Thu Sep 22 13:02:10 2022 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/ethernet/freescale/fec.h 7b15515fc1ca ("Revert "fec: Restart PPS after link state change"") 40c79ce13b03 ("net: fec: add stop mode support for imx8 platform") https://lore.kernel.org/all/20220921105337.62b41047@canb.auug.org.au/
drivers/pinctrl/pinctrl-ocelot.c c297561bc98a ("pinctrl: ocelot: Fix interrupt controller") 181f604b33cd ("pinctrl: ocelot: add ability to be used in a non-mmio configuration") https://lore.kernel.org/all/20220921110032.7cd28114@canb.auug.org.au/
tools/testing/selftests/drivers/net/bonding/Makefile bbb774d921e2 ("net: Add tests for bonding and team address list management") 152e8ec77640 ("selftests/bonding: add a test for bonding lladdr target") https://lore.kernel.org/all/20220921110437.5b7dbd82@canb.auug.org.au/
drivers/net/can/usb/gs_usb.c 5440428b3da6 ("can: gs_usb: gs_can_open(): fix race dev->can.state condition") 45dfa45f52e6 ("can: gs_usb: add RX and TX hardware timestamp support") https://lore.kernel.org/all/84f45a7d-92b6-4dc5-d7a1-072152fab6ff@tessares.ne...
Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --combined MAINTAINERS index 6705fb8bfd3a,efada49e2e2e..4c980c4e3a41 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -878,13 -878,6 +878,13 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/altera/
+ALTERA TSE PCS +M: Maxime Chevallier maxime.chevallier@bootlin.com +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/pcs/pcs-altera-tse.c +F: include/linux/pcs-altera-tse.h + ALTERA UART/JTAG UART SERIAL DRIVERS M: Tobias Klauser tklauser@distanz.ch L: linux-serial@vger.kernel.org @@@ -1810,7 -1803,7 +1810,7 @@@ N: sun[x456789] N: sun50i
ARM/Amlogic Meson SoC CLOCK FRAMEWORK - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org M: Jerome Brunet jbrunet@baylibre.com L: linux-amlogic@lists.infradead.org S: Maintained @@@ -1835,7 -1828,7 +1835,7 @@@ F: Documentation/devicetree/bindings/so F: sound/soc/meson/
ARM/Amlogic Meson SoC support - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org M: Kevin Hilman khilman@baylibre.com R: Jerome Brunet jbrunet@baylibre.com R: Martin Blumenstingl martin.blumenstingl@googlemail.com @@@ -2538,7 -2531,7 +2538,7 @@@ W: http://www.digriz.org.uk/ts78xx/kern F: arch/arm/mach-orion5x/ts78xx-*
ARM/OXNAS platform support - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-oxnas@groups.io (moderated for non-subscribers) S: Maintained @@@ -5729,6 -5722,13 +5729,6 @@@ F: include/linux/tfrc. F: include/uapi/linux/dccp.h F: net/dccp/
-DECnet NETWORK LAYER -L: linux-decnet-user@lists.sourceforge.net -S: Orphan -W: http://linux-decnet.sourceforge.net -F: Documentation/networking/decnet.rst -F: net/decnet/ - DECSTATION PLATFORM SUPPORT M: "Maciej W. Rozycki" macro@orcam.me.uk L: linux-mips@vger.kernel.org @@@ -6792,7 -6792,7 +6792,7 @@@ F: Documentation/devicetree/bindings/di F: drivers/gpu/drm/sun4i/
DRM DRIVERS FOR AMLOGIC SOCS - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org L: dri-devel@lists.freedesktop.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -6814,7 -6814,7 +6814,7 @@@ F: drivers/gpu/drm/atmel-hlcdc
DRM DRIVERS FOR BRIDGE CHIPS M: Andrzej Hajda andrzej.hajda@intel.com - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org M: Robert Foss robert.foss@linaro.org R: Laurent Pinchart Laurent.pinchart@ideasonboard.com R: Jonas Karlman jonas@kwiboo.se @@@ -8652,8 -8652,8 +8652,8 @@@ F: drivers/input/touchscreen/goodix
GOOGLE ETHERNET DRIVERS M: Jeroen de Borst jeroendb@google.com - R: Catherine Sullivan csully@google.com - R: David Awogbemila awogbemila@google.com + M: Catherine Sullivan csully@google.com + R: Shailend Chand shailend@google.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/ethernet/google/gve.rst @@@ -9122,7 -9122,7 +9122,7 @@@ S: Maintaine F: drivers/dma/hisi_dma.c
HISILICON GPIO DRIVER - M: Luo Jiaxing luojiaxing@huawei.com + M: Jay Fang f.fangjian@huawei.com L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-hisi.c @@@ -9208,8 -9208,8 +9208,8 @@@ F: Documentation/ABI/testing/debugfs-hi F: drivers/crypto/hisilicon/zip/
HISILICON ROCE DRIVER + M: Haoyue Xu xuhaoyue1@hisilicon.com M: Wenpeng Liang liangwenpeng@huawei.com - M: Weihang Li liweihang@huawei.com L: linux-rdma@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt @@@ -10828,7 -10828,7 +10828,7 @@@ F: drivers/media/tuners/it913x
ITE IT66121 HDMI BRIDGE DRIVER M: Phong LE ple@baylibre.com - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml @@@ -11347,7 -11347,7 +11347,7 @@@ F: kernel/debug F: kernel/module/kdb.c
KHADAS MCU MFD DRIVER - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org L: linux-amlogic@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/mfd/khadas,mcu.yaml @@@ -13218,7 -13218,7 +13218,7 @@@ S: Maintaine F: drivers/watchdog/menz69_wdt.c
MESON AO CEC DRIVER FOR AMLOGIC SOCS - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -13229,7 -13229,7 +13229,7 @@@ F: drivers/media/cec/platform/meson/ao- F: drivers/media/cec/platform/meson/ao-cec.c
MESON GE2D DRIVER FOR AMLOGIC SOCS - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -13245,7 -13245,7 +13245,7 @@@ F: Documentation/devicetree/bindings/mt F: drivers/mtd/nand/raw/meson_*
MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS - M: Neil Armstrong narmstrong@baylibre.com + M: Neil Armstrong neil.armstrong@linaro.org L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -14746,13 -14746,6 +14746,13 @@@ F: net/dsa/tag_ocelot. F: net/dsa/tag_ocelot_8021q.c F: tools/testing/selftests/drivers/net/ocelot/*
+OCELOT EXTERNAL SWITCH CONTROL +M: Colin Foster colin.foster@in-advantage.com +S: Supported +F: Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml +F: drivers/mfd/ocelot* +F: include/linux/mfd/ocelot.h + OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER M: Frederic Barrat fbarrat@linux.ibm.com M: Andrew Donnellan ajd@linux.ibm.com @@@ -16864,6 -16857,7 +16864,7 @@@ F: drivers/net/ethernet/qualcomm/emac
QUALCOMM ETHQOS ETHERNET DRIVER M: Vinod Koul vkoul@kernel.org + R: Bhupesh Sharma bhupesh.sharma@linaro.org L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/qcom,ethqos.txt @@@ -17752,6 -17746,17 +17753,17 @@@ L: linux-rdma@vger.kernel.or S: Maintained F: drivers/infiniband/ulp/rtrs/
+ RUNTIME VERIFICATION (RV) + M: Daniel Bristot de Oliveira bristot@kernel.org + M: Steven Rostedt rostedt@goodmis.org + L: linux-trace-devel@vger.kernel.org + S: Maintained + F: Documentation/trace/rv/ + F: include/linux/rv.h + F: include/rv/ + F: kernel/trace/rv/ + F: tools/verification/ + RXRPC SOCKETS (AF_RXRPC) M: David Howells dhowells@redhat.com M: Marc Dionne marc.dionne@auristor.com @@@ -19955,6 -19960,7 +19967,7 @@@ S: Supporte F: drivers/net/team/ F: include/linux/if_team.h F: include/uapi/linux/if_team.h + F: tools/testing/selftests/net/team/
TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT M: "Savoir-faire Linux Inc." kernel@savoirfairelinux.com @@@ -20618,6 -20624,7 +20631,7 @@@ F: include/*/ftrace. F: include/linux/trace*.h F: include/trace/ F: kernel/trace/ + F: scripts/tracing/ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE) @@@ -21866,11 -21873,9 +21880,11 @@@ F: drivers/input/tablet/wacom_serial4.
WANGXUN ETHERNET DRIVER M: Jiawen Wu jiawenwu@trustnetic.com +M: Mengyuan Lou mengyuanlou@net-swift.com +W: https://www.net-swift.com L: netdev@vger.kernel.org S: Maintained -F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst +F: Documentation/networking/device_drivers/ethernet/wangxun/* F: drivers/net/ethernet/wangxun/
WATCHDOG DEVICE DRIVERS diff --combined drivers/net/bonding/bond_main.c index ddd07395827a,86d42306aa5e..24bb50dfd362 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -865,12 -865,8 +865,8 @@@ static void bond_hw_addr_flush(struct n dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* del lacpdu mc addr from mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_del(slave_dev, lacpdu_multicast); - } + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_del(slave_dev, lacpdu_mcast_addr); }
/*--------------------------- Active slave change ---------------------------*/ @@@ -890,7 -886,8 +886,8 @@@ static void bond_hw_addr_swap(struct bo if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev); + if (bond->dev->flags & IFF_UP) + bond_hw_addr_flush(bond->dev, old_active->dev); }
if (new_active) { @@@ -901,10 -898,12 +898,12 @@@ if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev); - dev_uc_sync(new_active->dev, bond->dev); - dev_mc_sync(new_active->dev, bond->dev); - netif_addr_unlock_bh(bond->dev); + if (bond->dev->flags & IFF_UP) { + netif_addr_lock_bh(bond->dev); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); + netif_addr_unlock_bh(bond->dev); + } } }
@@@ -2166,16 -2165,14 +2165,14 @@@ int bond_enslave(struct net_device *bon } }
- netif_addr_lock_bh(bond_dev); - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - netif_addr_unlock_bh(bond_dev); - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + if (bond_dev->flags & IFF_UP) { + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev);
- dev_mc_add(slave_dev, lacpdu_multicast); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_add(slave_dev, lacpdu_mcast_addr); } }
@@@ -2447,7 -2444,8 +2444,8 @@@ static int __bond_release_one(struct ne if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev); + if (old_flags & IFF_UP) + bond_hw_addr_flush(bond_dev, slave_dev); }
slave_disable_netpoll(slave); @@@ -4184,6 -4182,12 +4182,12 @@@ static int bond_open(struct net_device struct list_head *iter; struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { + bond->rr_tx_counter = alloc_percpu(u32); + if (!bond->rr_tx_counter) + return -ENOMEM; + } + /* reset slave->backup and slave->inactive */ if (bond_has_slaves(bond)) { bond_for_each_slave(bond, slave, iter) { @@@ -4221,6 -4225,9 +4225,9 @@@ /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); + + bond_for_each_slave(bond, slave, iter) + dev_mc_add(slave->dev, lacpdu_mcast_addr); }
if (bond_mode_can_use_xmit_hash(bond)) @@@ -4232,6 -4239,7 +4239,7 @@@ static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave;
bond_work_cancel_all(bond); bond->send_peer_notif = 0; @@@ -4239,6 -4247,19 +4247,19 @@@ bond_alb_deinitialize(bond); bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) { + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_hw_addr_flush(bond_dev, slave->dev); + rcu_read_unlock(); + } else { + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) + bond_hw_addr_flush(bond_dev, slave->dev); + } + return 0; }
@@@ -5629,7 -5650,7 +5650,7 @@@ static int bond_ethtool_get_link_ksetti static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", BOND_ABI_VERSION); } @@@ -6228,15 -6249,6 +6249,6 @@@ static int bond_init(struct net_device if (!bond->wq) return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) { - bond->rr_tx_counter = alloc_percpu(u32); - if (!bond->rr_tx_counter) { - destroy_workqueue(bond->wq); - bond->wq = NULL; - return -ENOMEM; - } - } - spin_lock_init(&bond->stats_lock); netdev_lockdep_set_classes(bond_dev);
diff --combined drivers/net/can/flexcan/flexcan-core.c index 5e27944b8a21,ccb438eca517..5ee38e586fd8 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@@ -295,45 -295,45 +295,45 @@@ static_assert(sizeof(struct flexcan_reg static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, };
static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, };
static const struct flexcan_devtype_data fsl_imx25_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, };
static const struct flexcan_devtype_data fsl_imx28_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, };
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, };
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, };
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { @@@ -341,23 -341,23 +341,23 @@@ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, };
static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, };
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, };
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { @@@ -365,8 -365,8 +365,8 @@@ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, };
static const struct can_bittiming_const flexcan_bittiming_const = { @@@ -941,11 -941,6 +941,6 @@@ static struct sk_buff *flexcan_mailbox_ u32 reg_ctrl, reg_id, reg_iflag1; int i;
- if (unlikely(drop)) { - skb = ERR_PTR(-ENOBUFS); - goto mark_as_read; - } - mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { @@@ -974,6 -969,11 +969,11 @@@ reg_ctrl = priv->read(&mb->can_ctrl); }
+ if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else @@@ -2085,20 -2085,20 +2085,20 @@@ static int flexcan_probe(struct platfor if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && !((devtype_data->quirks & (FLEXCAN_QUIRK_USE_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR | - FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) == + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) == (FLEXCAN_QUIRK_USE_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) { + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) { dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n"); return -EINVAL; }
if ((devtype_data->quirks & - (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) == - FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) { + (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) == + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) { dev_err(&pdev->dev, "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n", devtype_data->quirks); @@@ -2177,7 -2177,8 +2177,7 @@@
err = flexcan_setup_stop_mode(pdev); if (err < 0) { - if (err != -EPROBE_DEFER) - dev_err(&pdev->dev, "setup stop mode failed\n"); + dev_err_probe(&pdev->dev, err, "setup stop mode failed\n"); goto failed_setup_stop_mode; }
diff --combined drivers/net/can/usb/gs_usb.c index cc363f1935ce,c1ff3c046d62..5e0d280b0cd3 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@@ -10,24 -10,20 +10,24 @@@ */
#include <linux/bitfield.h> +#include <linux/clocksource.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> +#include <linux/timecounter.h> +#include <linux/units.h> #include <linux/usb.h> +#include <linux/workqueue.h>
#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h>
/* Device specific constants */ -#define USB_GSUSB_1_VENDOR_ID 0x1d50 -#define USB_GSUSB_1_PRODUCT_ID 0x606f +#define USB_GS_USB_1_VENDOR_ID 0x1d50 +#define USB_GS_USB_1_PRODUCT_ID 0x606f
#define USB_CANDLELIGHT_VENDOR_ID 0x1209 #define USB_CANDLELIGHT_PRODUCT_ID 0x2323 @@@ -38,16 -34,8 +38,16 @@@ #define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0 #define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
-#define GSUSB_ENDPOINT_IN 1 -#define GSUSB_ENDPOINT_OUT 2 +#define GS_USB_ENDPOINT_IN 1 +#define GS_USB_ENDPOINT_OUT 2 + +/* Timestamp 32 bit timer runs at 1 MHz (1 ��s tick). Worker accounts + * for timer overflow (will be after ~71 minutes) + */ +#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ) +#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800 +static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC < + CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */ enum gs_usb_breq { @@@ -211,11 -199,6 +211,11 @@@ struct classic_can u8 data[8]; } __packed;
+struct classic_can_ts { + u8 data[8]; + __le32 timestamp_us; +} __packed; + struct classic_can_quirk { u8 data[8]; u8 quirk; @@@ -225,11 -208,6 +225,11 @@@ struct canfd u8 data[64]; } __packed;
+struct canfd_ts { + u8 data[64]; + __le32 timestamp_us; +} __packed; + struct canfd_quirk { u8 data[64]; u8 quirk; @@@ -246,10 -224,8 +246,10 @@@ struct gs_host_frame
union { DECLARE_FLEX_ARRAY(struct classic_can, classic_can); + DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts); DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk); DECLARE_FLEX_ARRAY(struct canfd, canfd); + DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts); DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk); }; } __packed; @@@ -283,11 -259,6 +283,11 @@@ struct gs_can struct can_bittiming_const bt_const, data_bt_const; unsigned int channel; /* channel number */
+ /* time counter for hardware timestamps */ + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work timestamp; + u32 feature; unsigned int hf_size_tx;
@@@ -380,87 -351,6 +380,87 @@@ static int gs_cmd_reset(struct gs_can * return rc; }
+static inline int gs_usb_get_timestamp(const struct gs_can *dev, + u32 *timestamp_p) +{ + __le32 timestamp; + int rc; + + rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), + GS_USB_BREQ_TIMESTAMP, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, + ×tamp, sizeof(timestamp), + USB_CTRL_GET_TIMEOUT, + GFP_KERNEL); + if (rc) + return rc; + + *timestamp_p = le32_to_cpu(timestamp); + + return 0; +} + +static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) +{ + const struct gs_can *dev; + u32 timestamp = 0; + int err; + + dev = container_of(cc, struct gs_can, cc); + err = gs_usb_get_timestamp(dev, ×tamp); + if (err) + netdev_err(dev->netdev, + "Error %d while reading timestamp. HW timestamps may be inaccurate.", + err); + + return timestamp; +} + +static void gs_usb_timestamp_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct gs_can *dev; + + dev = container_of(delayed_work, struct gs_can, timestamp); + timecounter_read(&dev->tc); + + schedule_delayed_work(&dev->timestamp, + GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +static void gs_usb_skb_set_timestamp(const struct gs_can *dev, + struct sk_buff *skb, u32 timestamp) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + u64 ns; + + ns = timecounter_cyc2time(&dev->tc, timestamp); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void gs_usb_timestamp_init(struct gs_can *dev) +{ + struct cyclecounter *cc = &dev->cc; + + cc->read = gs_usb_timestamp_read; + cc->mask = CYCLECOUNTER_MASK(32); + cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ); + cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift); + + timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns()); + + INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work); + schedule_delayed_work(&dev->timestamp, + GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +static void gs_usb_timestamp_stop(struct gs_can *dev) +{ + cancel_delayed_work_sync(&dev->timestamp); +} + static void gs_update_state(struct gs_can *dev, struct can_frame *cf) { struct can_device_stats *can_stats = &dev->can.can_stats; @@@ -486,24 -376,6 +486,24 @@@ } }
+static void gs_usb_set_timestamp(const struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) +{ + u32 timestamp; + + if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)) + return; + + if (hf->flags & GS_CAN_FLAG_FD) + timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us); + else + timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us); + + gs_usb_skb_set_timestamp(dev, skb, timestamp); + + return; +} + static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *usbcan = urb->context; @@@ -571,8 -443,6 +571,8 @@@ gs_update_state(dev, cf); }
+ gs_usb_set_timestamp(dev, skb, hf); + netdev->stats.rx_packets++; netdev->stats.rx_bytes += hf->can_dlc;
@@@ -595,9 -465,6 +595,9 @@@ goto resubmit_urb; }
+ skb = dev->can.echo_skb[hf->echo_id]; + gs_usb_set_timestamp(dev, skb, hf); + netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id, NULL); @@@ -624,7 -491,7 +624,7 @@@
resubmit_urb: usb_fill_bulk_urb(urb, usbcan->udev, - usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN), + usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN), hf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, usbcan);
@@@ -792,7 -659,7 +792,7 @@@ static netdev_tx_t gs_can_start_xmit(st }
usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), + usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT), hf, dev->hf_size_tx, gs_usb_xmit_callback, txc);
@@@ -902,7 -769,7 +902,7 @@@ static int gs_can_open(struct net_devic usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, - GSUSB_ENDPOINT_IN), + GS_USB_ENDPOINT_IN), buf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); @@@ -956,11 -823,8 +956,12 @@@ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ /* if hardware supports timestamps, enable it */ + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + flags |= GS_CAN_MODE_HW_TIMESTAMP; + /* finally start device */ + dev->can.state = CAN_STATE_ERROR_ACTIVE; dm->mode = cpu_to_le32(GS_CAN_MODE_START); dm->flags = cpu_to_le32(flags); rc = usb_control_msg(interface_to_usbdev(dev->iface), @@@ -972,17 -836,12 +973,16 @@@ if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); + dev->can.state = CAN_STATE_STOPPED; return rc; }
kfree(dm);
+ /* start polling timestamp */ + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + gs_usb_timestamp_init(dev); + - dev->can.state = CAN_STATE_ERROR_ACTIVE; - parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@@ -999,10 -858,6 +999,10 @@@ static int gs_can_close(struct net_devi
netif_stop_queue(netdev);
+ /* stop polling timestamp */ + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + gs_usb_timestamp_stop(dev); + /* Stop polling */ parent->active_channels--; if (!parent->active_channels) { @@@ -1035,22 -890,11 +1035,22 @@@ return 0; }
+static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + const struct gs_can *dev = netdev_priv(netdev); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + return can_eth_ioctl_hwts(netdev, ifr, cmd); + + return -EOPNOTSUPP; +} + static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, .ndo_change_mtu = can_change_mtu, + .ndo_eth_ioctl = gs_can_eth_ioctl, };
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) @@@ -1081,17 -925,21 +1081,21 @@@ }
/* blink LED's for finding the this interface */ - static int gs_usb_set_phys_id(struct net_device *dev, + static int gs_usb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { + const struct gs_can *dev = netdev_priv(netdev); int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY)) + return -EOPNOTSUPP; + switch (state) { case ETHTOOL_ID_ACTIVE: - rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); + rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON); break; case ETHTOOL_ID_INACTIVE: - rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); + rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF); break; default: break; @@@ -1100,21 -948,9 +1104,21 @@@ return rc; }
+static int gs_usb_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct gs_can *dev = netdev_priv(netdev); + + /* report if device supports HW timestamps */ + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + return can_ethtool_op_get_ts_info_hwts(netdev, info); + + return ethtool_op_get_ts_info(netdev, info); +} + static const struct ethtool_ops gs_usb_ethtool_ops = { .set_phys_id = gs_usb_set_phys_id, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = gs_usb_get_ts_info, };
static struct gs_can *gs_make_candev(unsigned int channel, @@@ -1231,8 -1067,8 +1235,8 @@@ * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this * issue. */ - if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) && - dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) && + if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) && + dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) && dev->udev->manufacturer && dev->udev->product && !strcmp(dev->udev->manufacturer, "LinkLayer Labs") && !strcmp(dev->udev->product, "CANtact Pro") && @@@ -1240,9 -1076,10 +1244,10 @@@ dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX | GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
- if (le32_to_cpu(dconf->sw_version) > 1) - if (feature & GS_CAN_FEATURE_IDENTIFY) - netdev->ethtool_ops = &gs_usb_ethtool_ops; + /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */ + if (!(le32_to_cpu(dconf->sw_version) > 1 && + feature & GS_CAN_FEATURE_IDENTIFY)) + dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
kfree(bt_const);
@@@ -1370,13 -1207,15 +1375,13 @@@ static int gs_usb_probe(struct usb_inte }
init_usb_anchor(&dev->rx_submitted); - /* default to classic CAN, switch to CAN-FD if at least one of - * our channels support CAN-FD. - */ - dev->hf_size_rx = struct_size(hf, classic_can, 1);
usb_set_intfdata(intf, dev); dev->udev = udev;
for (i = 0; i < icount; i++) { + unsigned int hf_size_rx = 0; + dev->canch[i] = gs_make_candev(i, intf, dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ @@@ -1394,21 -1233,8 +1399,21 @@@ } dev->canch[i]->parent = dev;
- if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) - dev->hf_size_rx = struct_size(hf, canfd, 1); + /* set RX packet size based on FD and if hardware + * timestamps are supported. + */ + if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + hf_size_rx = struct_size(hf, canfd_ts, 1); + else + hf_size_rx = struct_size(hf, canfd, 1); + } else { + if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + hf_size_rx = struct_size(hf, classic_can_ts, 1); + else + hf_size_rx = struct_size(hf, classic_can, 1); + } + dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx); }
kfree(dconf); @@@ -1437,8 -1263,8 +1442,8 @@@ static void gs_usb_disconnect(struct us }
static const struct usb_device_id gs_usb_table[] = { - { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, - USB_GSUSB_1_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID, + USB_GS_USB_1_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, USB_CANDLELIGHT_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID, diff --combined drivers/net/dsa/microchip/lan937x_main.c index 3e83f8ca0f09,5579644e8fde..cefe8517629a --- a/drivers/net/dsa/microchip/lan937x_main.c +++ b/drivers/net/dsa/microchip/lan937x_main.c @@@ -10,8 -10,6 +10,8 @@@ #include <linux/of_mdio.h> #include <linux/if_bridge.h> #include <linux/if_vlan.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/math.h> #include <net/dsa.h> #include <net/switchdev.h> @@@ -20,8 -18,6 +20,8 @@@ #include "ksz_common.h" #include "lan937x.h"
+#define LAN937x_PNIRQS 6 + static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@@ -132,14 -128,14 +132,14 @@@ static int lan937x_internal_phy_read(st return ksz_read16(dev, REG_VPHY_IND_DATA__2, val); }
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) +int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) { - lan937x_internal_phy_read(dev, addr, reg, data); + return lan937x_internal_phy_read(dev, addr, reg, data); }
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) +int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) { - lan937x_internal_phy_write(dev, addr, reg, val); + return lan937x_internal_phy_write(dev, addr, reg, val); }
static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) @@@ -169,45 -165,6 +169,45 @@@ static int lan937x_sw_mdio_write(struc return lan937x_internal_phy_write(dev, addr, regnum, val); }
+static int lan937x_irq_phy_setup(struct ksz_device *dev) +{ + struct dsa_switch *ds = dev->ds; + int phy, err_phy; + int irq; + int ret; + + for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) { + if (BIT(phy) & ds->phys_mii_mask) { + irq = irq_find_mapping(dev->ports[phy].pirq.domain, + PORT_SRC_PHY_INT); + if (irq < 0) { + ret = irq; + goto out; + } + ds->slave_mii_bus->irq[phy] = irq; + } + } + return 0; +out: + err_phy = phy; + + for (phy = 0; phy < err_phy; phy++) + if (BIT(phy) & ds->phys_mii_mask) + irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + + return ret; +} + +static void lan937x_irq_phy_free(struct ksz_device *dev) +{ + struct dsa_switch *ds = dev->ds; + int phy; + + for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) + if (BIT(phy) & ds->phys_mii_mask) + irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); +} + static int lan937x_mdio_register(struct ksz_device *dev) { struct dsa_switch *ds = dev->ds; @@@ -237,17 -194,10 +237,17 @@@
ds->slave_mii_bus = bus;
+ ret = lan937x_irq_phy_setup(dev); + if (ret) { + of_node_put(mdio_np); + return ret; + } + ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); if (ret) { dev_err(ds->dev, "unable to register MDIO bus %s\n", bus->id); + lan937x_irq_phy_free(dev); }
of_node_put(mdio_np); @@@ -275,10 -225,6 +275,10 @@@ int lan937x_reset_switch(struct ksz_dev if (ret < 0) return ret;
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT); + if (ret < 0) + return ret; + ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF); if (ret < 0) return ret; @@@ -298,10 -244,6 +298,6 @@@ void lan937x_port_setup(struct ksz_devi lan937x_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, true);
- /* disable frame check length field */ - lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH, - false); - /* set back pressure for half duplex */ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); @@@ -369,23 -311,6 +365,23 @@@ int lan937x_change_mtu(struct ksz_devic return 0; }
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs) +{ + u32 secs = msecs / 1000; + u32 value; + int ret; + + value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); + + ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value); + if (ret < 0) + return ret; + + value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs); + + return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value); +} + static void lan937x_set_tune_adj(struct ksz_device *dev, int port, u16 reg, u8 val) { @@@ -454,289 -379,9 +450,289 @@@ void lan937x_setup_rgmii_delay(struct k } }
+int lan937x_switch_init(struct ksz_device *dev) +{ + dev->port_mask = (1 << dev->info->port_cnt) - 1; + + return 0; +} + +static void lan937x_girq_mask(struct irq_data *d) +{ + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + dev->girq.masked |= (1 << n); +} + +static void lan937x_girq_unmask(struct irq_data *d) +{ + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + dev->girq.masked &= ~(1 << n); +} + +static void lan937x_girq_bus_lock(struct irq_data *d) +{ + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + + mutex_lock(&dev->lock_irq); +} + +static void lan937x_girq_bus_sync_unlock(struct irq_data *d) +{ + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + int ret; + + ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, dev->girq.masked); + if (ret) + dev_err(dev->dev, "failed to change IRQ mask\n"); + + mutex_unlock(&dev->lock_irq); +} + +static const struct irq_chip lan937x_girq_chip = { + .name = "lan937x-global", + .irq_mask = lan937x_girq_mask, + .irq_unmask = lan937x_girq_unmask, + .irq_bus_lock = lan937x_girq_bus_lock, + .irq_bus_sync_unlock = lan937x_girq_bus_sync_unlock, +}; + +static int lan937x_girq_domain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + struct ksz_device *dev = d->host_data; + + irq_set_chip_data(irq, d->host_data); + irq_set_chip_and_handler(irq, &dev->girq.chip, handle_level_irq); + irq_set_noprobe(irq); + + return 0; +} + +static const struct irq_domain_ops lan937x_girq_domain_ops = { + .map = lan937x_girq_domain_map, + .xlate = irq_domain_xlate_twocell, +}; + +static void lan937x_girq_free(struct ksz_device *dev) +{ + int irq, virq; + + free_irq(dev->irq, dev); + + for (irq = 0; irq < dev->girq.nirqs; irq++) { + virq = irq_find_mapping(dev->girq.domain, irq); + irq_dispose_mapping(virq); + } + + irq_domain_remove(dev->girq.domain); +} + +static irqreturn_t lan937x_girq_thread_fn(int irq, void *dev_id) +{ + struct ksz_device *dev = dev_id; + unsigned int nhandled = 0; + unsigned int sub_irq; + unsigned int n; + u32 data; + int ret; + + /* Read global interrupt status register */ + ret = ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data); + if (ret) + goto out; + + for (n = 0; n < dev->girq.nirqs; ++n) { + if (data & (1 << n)) { + sub_irq = irq_find_mapping(dev->girq.domain, n); + handle_nested_irq(sub_irq); + ++nhandled; + } + } +out: + return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); +} + +static int lan937x_girq_setup(struct ksz_device *dev) +{ + int ret, irq; + + dev->girq.nirqs = dev->info->port_cnt; + dev->girq.domain = irq_domain_add_simple(NULL, dev->girq.nirqs, 0, + &lan937x_girq_domain_ops, dev); + if (!dev->girq.domain) + return -ENOMEM; + + for (irq = 0; irq < dev->girq.nirqs; irq++) + irq_create_mapping(dev->girq.domain, irq); + + dev->girq.chip = lan937x_girq_chip; + dev->girq.masked = ~0; + + ret = request_threaded_irq(dev->irq, NULL, lan937x_girq_thread_fn, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + dev_name(dev->dev), dev); + if (ret) + goto out; + + return 0; + +out: + lan937x_girq_free(dev); + + return ret; +} + +static void lan937x_pirq_mask(struct irq_data *d) +{ + struct ksz_port *port = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + port->pirq.masked |= (1 << n); +} + +static void lan937x_pirq_unmask(struct irq_data *d) +{ + struct ksz_port *port = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + port->pirq.masked &= ~(1 << n); +} + +static void lan937x_pirq_bus_lock(struct irq_data *d) +{ + struct ksz_port *port = irq_data_get_irq_chip_data(d); + struct ksz_device *dev = port->ksz_dev; + + mutex_lock(&dev->lock_irq); +} + +static void lan937x_pirq_bus_sync_unlock(struct irq_data *d) +{ + struct ksz_port *port = irq_data_get_irq_chip_data(d); + struct ksz_device *dev = port->ksz_dev; + + ksz_pwrite8(dev, port->num, REG_PORT_INT_MASK, port->pirq.masked); + mutex_unlock(&dev->lock_irq); +} + +static const struct irq_chip lan937x_pirq_chip = { + .name = "lan937x-port", + .irq_mask = lan937x_pirq_mask, + .irq_unmask = lan937x_pirq_unmask, + .irq_bus_lock = lan937x_pirq_bus_lock, + .irq_bus_sync_unlock = lan937x_pirq_bus_sync_unlock, +}; + +static int lan937x_pirq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct ksz_port *port = d->host_data; + + irq_set_chip_data(irq, d->host_data); + irq_set_chip_and_handler(irq, &port->pirq.chip, handle_level_irq); + irq_set_noprobe(irq); + + return 0; +} + +static const struct irq_domain_ops lan937x_pirq_domain_ops = { + .map = lan937x_pirq_domain_map, + .xlate = irq_domain_xlate_twocell, +}; + +static void lan937x_pirq_free(struct ksz_device *dev, u8 p) +{ + struct ksz_port *port = &dev->ports[p]; + int irq, virq; + int irq_num; + + irq_num = irq_find_mapping(dev->girq.domain, p); + if (irq_num < 0) + return; + + free_irq(irq_num, port); + + for (irq = 0; irq < port->pirq.nirqs; irq++) { + virq = irq_find_mapping(port->pirq.domain, irq); + irq_dispose_mapping(virq); + } + + irq_domain_remove(port->pirq.domain); +} + +static irqreturn_t lan937x_pirq_thread_fn(int irq, void *dev_id) +{ + struct ksz_port *port = dev_id; + unsigned int nhandled = 0; + struct ksz_device *dev; + unsigned int sub_irq; + unsigned int n; + u8 data; + + dev = port->ksz_dev; + + /* Read port interrupt status register */ + ksz_pread8(dev, port->num, REG_PORT_INT_STATUS, &data); + + for (n = 0; n < port->pirq.nirqs; ++n) { + if (data & (1 << n)) { + sub_irq = irq_find_mapping(port->pirq.domain, n); + handle_nested_irq(sub_irq); + ++nhandled; + } + } + + return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); +} + +static int lan937x_pirq_setup(struct ksz_device *dev, u8 p) +{ + struct ksz_port *port = &dev->ports[p]; + int ret, irq; + int irq_num; + + port->pirq.nirqs = LAN937x_PNIRQS; + port->pirq.domain = irq_domain_add_simple(dev->dev->of_node, + port->pirq.nirqs, 0, + &lan937x_pirq_domain_ops, + port); + if (!port->pirq.domain) + return -ENOMEM; + + for (irq = 0; irq < port->pirq.nirqs; irq++) + irq_create_mapping(port->pirq.domain, irq); + + port->pirq.chip = lan937x_pirq_chip; + port->pirq.masked = ~0; + + irq_num = irq_find_mapping(dev->girq.domain, p); + if (irq_num < 0) + return irq_num; + + snprintf(port->pirq.name, sizeof(port->pirq.name), "port_irq-%d", p); + + ret = request_threaded_irq(irq_num, NULL, lan937x_pirq_thread_fn, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + port->pirq.name, port); + if (ret) + goto out; + + return 0; + +out: + lan937x_pirq_free(dev, p); + + return ret; +} + int lan937x_setup(struct dsa_switch *ds) { struct ksz_device *dev = ds->priv; + struct dsa_port *dp; int ret;
/* enable Indirect Access from SPI to the VPHY registers */ @@@ -746,22 -391,10 +742,22 @@@ return ret; }
+ if (dev->irq > 0) { + ret = lan937x_girq_setup(dev); + if (ret) + return ret; + + dsa_switch_for_each_user_port(dp, dev->ds) { + ret = lan937x_pirq_setup(dev, dp->index); + if (ret) + goto out_girq; + } + } + ret = lan937x_mdio_register(dev); if (ret < 0) { dev_err(dev->dev, "failed to register the mdio"); - return ret; + goto out_pirq; }
/* The VLAN aware is a global setting. Mixed vlan @@@ -787,29 -420,13 +783,29 @@@ (SW_CLK125_ENB | SW_CLK25_ENB), true);
return 0; + +out_pirq: + if (dev->irq > 0) + dsa_switch_for_each_user_port(dp, dev->ds) + lan937x_pirq_free(dev, dp->index); +out_girq: + if (dev->irq > 0) + lan937x_girq_free(dev); + + return ret; }
-int lan937x_switch_init(struct ksz_device *dev) +void lan937x_teardown(struct dsa_switch *ds) { - dev->port_mask = (1 << dev->info->port_cnt) - 1; + struct ksz_device *dev = ds->priv; + struct dsa_port *dp;
- return 0; + if (dev->irq > 0) { + dsa_switch_for_each_user_port(dp, dev->ds) + lan937x_pirq_free(dev, dp->index); + + lan937x_girq_free(dev); + } }
void lan937x_switch_exit(struct ksz_device *dev) diff --combined drivers/net/ethernet/freescale/fec.h index dd055d734363,a5fed00cb971..b0100fe3c9e4 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@@ -19,8 -19,6 +19,8 @@@ #include <linux/pm_qos.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> +#include <dt-bindings/firmware/imx/rsrc.h> +#include <linux/firmware/imx/sci.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ @@@ -563,6 -561,7 +563,7 @@@ struct fec_enet_private struct clk *clk_2x_txclk;
bool ptp_clk_on; + struct mutex ptp_clk_mutex; unsigned int num_tx_queues; unsigned int num_rx_queues;
@@@ -584,7 -583,6 +585,7 @@@ struct device_node *phy_node; bool rgmii_txc_dly; bool rgmii_rxc_dly; + bool rpm_active; int link; int full_duplex; int speed; @@@ -641,15 -639,6 +642,8 @@@ int pps_enable; unsigned int next_counter;
- struct { - struct timespec64 ts_phc; - u64 ns_sys; - u32 at_corr; - u8 at_inc_corr; - } ptp_saved_state; - + struct imx_sc_ipc *ipc_handle; + u64 ethtool_stats[]; };
@@@ -660,8 -649,5 +654,5 @@@ void fec_ptp_disable_hwts(struct net_de int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
- void fec_ptp_save_state(struct fec_enet_private *fep); - int fec_ptp_restore_state(struct fec_enet_private *fep); - /****************************************************************************/ #endif /* FEC_H */ diff --combined drivers/net/ethernet/freescale/fec_main.c index d8b487f5c1ca,92c55e1a5507..59921218a8a4 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -156,13 -156,6 +156,13 @@@ static const struct fec_devinfo fec_imx FEC_QUIRK_DELAYED_CLKS_SUPPORT, };
+static const struct fec_devinfo fec_s32v234_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@@ -195,9 -188,6 +195,9 @@@ }, { .name = "imx8qm-fec", .driver_data = (kernel_ulong_t)&fec_imx8qm_info, + }, { + .name = "s32v234-fec", + .driver_data = (kernel_ulong_t)&fec_s32v234_info, }, { /* sentinel */ } @@@ -214,7 -204,6 +214,7 @@@ enum imx_fec_type IMX6UL_FEC, IMX8MQ_FEC, IMX8QM_FEC, + S32V234_FEC, };
static const struct of_device_id fec_dt_ids[] = { @@@ -227,7 -216,6 +227,7 @@@ { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, + { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@@ -298,11 -286,8 +298,8 @@@ MODULE_PARM_DESC(macaddr, "FEC Etherne #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) /* FEC ECR bits definition */ - #define FEC_ECR_RESET BIT(0) - #define FEC_ECR_ETHEREN BIT(1) - #define FEC_ECR_MAGICEN BIT(2) - #define FEC_ECR_SLEEP BIT(3) - #define FEC_ECR_EN1588 BIT(4) + #define FEC_ECR_MAGICEN (1 << 2) + #define FEC_ECR_SLEEP (1 << 3)
#define FEC_MII_TIMEOUT 30000 /* us */
@@@ -998,9 -983,6 +995,6 @@@ fec_restart(struct net_device *ndev u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS }; - - fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC @@@ -1154,7 -1136,7 +1148,7 @@@ }
if (fep->bufdesc_ex) - ecntl |= FEC_ECR_EN1588; + ecntl |= (1 << 4);
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && fep->rgmii_txc_dly) @@@ -1175,14 -1157,6 +1169,6 @@@ if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev);
- /* Restart PPS if needed */ - if (fep->pps_enable) { - /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */ - fep->pps_enable = 0; - fec_ptp_restore_state(fep); - fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1); - } - /* Enable interrupts we wish to service */ if (fep->link) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@@ -1194,34 -1168,6 +1180,34 @@@
}
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) +{ + if (!(of_machine_is_compatible("fsl,imx8qm") || + of_machine_is_compatible("fsl,imx8qxp") || + of_machine_is_compatible("fsl,imx8dxl"))) + return 0; + + return imx_scu_get_handle(&fep->ipc_handle); +} + +static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) +{ + struct device_node *np = fep->pdev->dev.of_node; + u32 rsrc_id, val; + int idx; + + if (!np || !fep->ipc_handle) + return; + + idx = of_alias_get_id(np, "ethernet"); + if (idx < 0) + idx = 0; + rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; + + val = enabled ? 1 : 0; + imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); +} + static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) { struct fec_platform_data *pdata = fep->pdev->dev.platform_data; @@@ -1237,8 -1183,6 +1223,8 @@@ BIT(stop_gpr->bit), 0); } else if (pdata && pdata->sleep_mode_enable) { pdata->sleep_mode_enable(enabled); + } else { + fec_enet_ipg_stop_set(fep, enabled); } }
@@@ -1263,8 -1207,6 +1249,6 @@@ fec_stop(struct net_device *ndev struct fec_enet_private *fep = netdev_priv(ndev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 val; - struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS }; - u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@@ -1274,8 -1216,6 +1258,6 @@@ netdev_err(ndev, "Graceful transmit stop did not complete!\n"); }
- fec_ptp_save_state(fep); - /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. @@@ -1295,28 -1235,12 +1277,12 @@@ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
- if (fep->bufdesc_ex) - ecntl |= FEC_ECR_EN1588; - /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - ecntl |= FEC_ECR_ETHEREN; + writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } - - writel(ecntl, fep->hwp + FEC_ECNTRL); - - if (fep->bufdesc_ex) - fec_ptp_start_cyclecounter(ndev); - - /* Restart PPS if needed */ - if (fep->pps_enable) { - /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */ - fep->pps_enable = 0; - fec_ptp_restore_state(fep); - fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1); - } }
@@@ -2071,7 -1995,6 +2037,6 @@@ static void fec_enet_phy_reset_after_cl static int fec_enet_clk_enable(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - unsigned long flags; int ret;
if (enable) { @@@ -2080,15 -2003,15 +2045,15 @@@ return ret;
if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); if (ret) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); goto failed_clk_ptp; } else { fep->ptp_clk_on = true; } - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); }
ret = clk_prepare_enable(fep->clk_ref); @@@ -2103,10 -2026,10 +2068,10 @@@ } else { clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } clk_disable_unprepare(fep->clk_ref); clk_disable_unprepare(fep->clk_2x_txclk); @@@ -2119,10 -2042,10 +2084,10 @@@ failed_clk_2x_txclk clk_disable_unprepare(fep->clk_ref); failed_clk_ref: if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } failed_clk_ptp: clk_disable_unprepare(fep->clk_enet_out); @@@ -2182,13 -2105,13 +2147,13 @@@ static int fec_enet_mii_probe(struct ne continue; if (dev_id--) continue; - strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; }
if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; }
@@@ -2372,9 -2295,9 +2337,9 @@@ static void fec_enet_get_drvinfo(struc { struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name, + strscpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); + strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); }
static int fec_enet_get_regs_len(struct net_device *ndev) @@@ -3901,10 -3824,6 +3866,10 @@@ fec_probe(struct platform_device *pdev !of_property_read_bool(np, "fsl,err006687-workaround-present")) fep->quirks |= FEC_QUIRK_ERR006687;
+ ret = fec_enet_ipc_handle_init(fep); + if (ret) + goto failed_ipc_init; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@@ -3961,7 -3880,7 +3926,7 @@@ }
fep->ptp_clk_on = false; - spin_lock_init(&fep->tmreg_lock); + mutex_init(&fep->ptp_clk_mutex);
/* clk_ref is optional, depends on board */ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); @@@ -4102,7 -4021,6 +4067,7 @@@ failed_rgmii_delay of_phy_deregister_fixed_link(np); of_node_put(phy_node); failed_stop_mode: +failed_ipc_init: failed_phy: dev_id--; failed_ioremap: @@@ -4147,7 -4065,6 +4112,7 @@@ static int __maybe_unused fec_suspend(s { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret;
rtnl_lock(); if (netif_running(ndev)) { @@@ -4172,15 -4089,6 +4137,15 @@@ } /* It's safe to disable clocks since interrupts are masked */ fec_enet_clk_enable(ndev, false); + + fep->rpm_active = !pm_runtime_status_suspended(dev); + if (fep->rpm_active) { + ret = pm_runtime_force_suspend(dev); + if (ret < 0) { + rtnl_unlock(); + return ret; + } + } } rtnl_unlock();
@@@ -4211,9 -4119,6 +4176,9 @@@ static int __maybe_unused fec_resume(st
rtnl_lock(); if (netif_running(ndev)) { + if (fep->rpm_active) + pm_runtime_force_resume(dev); + ret = fec_enet_clk_enable(ndev, true); if (ret) { rtnl_unlock(); diff --combined drivers/net/ethernet/freescale/fec_ptp.c index 7be97ab84e50,3dc3c0b626c2..cffd9ad499dd --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@@ -365,19 -365,21 +365,21 @@@ static int fec_ptp_adjtime(struct ptp_c */ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct fec_enet_private *fep = + struct fec_enet_private *adapter = container_of(ptp, struct fec_enet_private, ptp_caps); u64 ns; unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&adapter->ptp_clk_mutex); /* Check the ptp clock */ - if (!fep->ptp_clk_on) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + if (!adapter->ptp_clk_on) { + mutex_unlock(&adapter->ptp_clk_mutex); return -EINVAL; } - ns = timecounter_read(&fep->tc); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@@ -402,10 -404,10 +404,10 @@@ static int fec_ptp_settime(struct ptp_c unsigned long flags; u32 counter;
- spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); /* Check the ptp clock */ if (!fep->ptp_clk_on) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return -EINVAL; }
@@@ -415,9 -417,11 +417,11 @@@ */ counter = ns & fep->cc.mask;
+ spin_lock_irqsave(&fep->tmreg_lock, flags); writel(counter, fep->hwp + FEC_ATIME); timecounter_init(&fep->tc, &fep->cc, ns); spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return 0; }
@@@ -514,11 -518,13 +518,13 @@@ static void fec_time_keep(struct work_s struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); if (fep->ptp_clk_on) { + spin_lock_irqsave(&fep->tmreg_lock, flags); timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); } - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex);
schedule_delayed_work(&fep->time_keep, HZ); } @@@ -572,7 -578,7 +578,7 @@@ void fec_ptp_init(struct platform_devic int ret;
fep->ptp_caps.owner = THIS_MODULE; - strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); + strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; @@@ -593,6 -599,8 +599,8 @@@ } fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+ spin_lock_init(&fep->tmreg_lock); + fec_ptp_start_cyclecounter(ndev);
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); @@@ -625,36 -633,7 +633,7 @@@ void fec_ptp_stop(struct platform_devic struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev);
- if (fep->pps_enable) - fec_ptp_enable_pps(fep, 0); - cancel_delayed_work_sync(&fep->time_keep); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } - - void fec_ptp_save_state(struct fec_enet_private *fep) - { - u32 atime_inc_corr; - - fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc); - fep->ptp_saved_state.ns_sys = ktime_get_ns(); - - fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); - atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; - fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); - } - - int fec_ptp_restore_state(struct fec_enet_private *fep) - { - u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; - u64 ns_sys; - - writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); - atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; - writel(atime_inc, fep->hwp + FEC_ATIME_INC); - - ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys; - timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys); - return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc); - } diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 5e60ff79450d,e3d9804aeb25..3c8b70d31232 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -66,7 -66,6 +66,7 @@@ static const struct pci_device_id i40e_ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, @@@ -3879,7 -3878,7 +3879,7 @@@ static void i40e_vsi_configure_msix(str wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit));
- /* Linked list for the queuepairs assigned to this vector */ + /* begin of linked list for RX queue assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; @@@ -3895,7 -3894,6 +3895,7 @@@ wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) { + /* TX queue with next queue set to TX */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | @@@ -3905,7 -3903,7 +3905,7 @@@
wr32(hw, I40E_QINT_TQCTL(nextqp), val); } - + /* TX queue with next RX or end of linked list */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | @@@ -3974,6 -3972,7 +3974,6 @@@ static void i40e_configure_msi_and_lega struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 val;
/* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; @@@ -3990,20 -3989,28 +3990,20 @@@ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the queue int */ - val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | - (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | - (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_RQCTL(0), val); + /* Associate the queue pair to the vector and enable the queue + * interrupt RX queue in linked list with next queue set to TX + */ + wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
if (i40e_enabled_xdp_vsi(vsi)) { - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX - << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(nextqp), val); + /* TX queue in linked list with next queue set to TX */ + wr32(hw, I40E_QINT_TQCTL(nextqp), + I40E_QINT_TQCTL_VAL(nextqp, 0, TX)); }
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | - (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(0), val); + /* last TX queue so the next RX queue doesn't matter */ + wr32(hw, I40E_QINT_TQCTL(0), + I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX)); i40e_flush(hw); }
@@@ -5901,6 -5908,26 +5901,26 @@@ static int i40e_get_link_speed(struct i } }
+ /** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ + static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) + { + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { + dev_warn(&vsi->back->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = I40E_BW_CREDIT_DIVISOR; + } else { + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + } + + return max_tx_rate; + } + /** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured @@@ -5923,10 -5950,10 +5943,10 @@@ int i40e_set_bw_limit(struct i40e_vsi * max_tx_rate, seid); return -EINVAL; } - if (max_tx_rate && max_tx_rate < 50) { + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; + max_tx_rate = I40E_BW_CREDIT_DIVISOR; }
/* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@@ -8217,9 -8244,9 +8237,9 @@@ config_tc
if (i40e_is_tc_mqprio_enabled(pf)) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; @@@ -10697,7 -10724,7 +10717,7 @@@ static void i40e_send_version(struct i4 dv.minor_version = 0xff; dv.build_version = 0xff; dv.subbuild_version = 0; - strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); + strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); }
@@@ -10964,10 -10991,10 +10984,10 @@@ static void i40e_rebuild(struct i40e_p }
if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; @@@ -16045,23 -16072,23 +16065,23 @@@ static int i40e_probe(struct pci_dev *p
switch (hw->bus.speed) { case i40e_bus_speed_8000: - strlcpy(speed, "8.0", PCI_SPEED_SIZE); break; + strscpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: - strlcpy(speed, "5.0", PCI_SPEED_SIZE); break; + strscpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: - strlcpy(speed, "2.5", PCI_SPEED_SIZE); break; + strscpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: - strlcpy(width, "8", PCI_WIDTH_SIZE); break; + strscpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: - strlcpy(width, "4", PCI_WIDTH_SIZE); break; + strscpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: - strlcpy(width, "2", PCI_WIDTH_SIZE); break; + strscpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: - strlcpy(width, "1", PCI_WIDTH_SIZE); break; + strscpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } diff --combined drivers/net/ethernet/intel/iavf/iavf_main.c index 1671e52b6ba2,0c89f16bf1e2..79fef8c59d65 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@@ -1077,7 -1077,6 +1077,6 @@@ static int iavf_set_mac(struct net_devi { struct iavf_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; - bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data); int ret;
if (!is_valid_ether_addr(addr->sa_data)) @@@ -1094,10 -1093,9 +1093,9 @@@ return 0; }
- if (handle_mac) - goto done; - - ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500)); + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + iavf_is_mac_set_handled(netdev, addr->sa_data), + msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout. @@@ -1111,7 -1109,6 +1109,6 @@@ if (!ret) return -EAGAIN;
- done: if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) return -EACCES;
@@@ -1270,138 -1267,66 +1267,138 @@@ static void iavf_up_complete(struct iav }
/** - * iavf_down - Shutdown the connection processing + * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF + * yet and mark other to be removed. * @adapter: board private structure - * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -void iavf_down(struct iavf_adapter *adapter) +static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - struct iavf_vlan_filter *vlf; - struct iavf_cloud_filter *cf; - struct iavf_fdir_fltr *fdir; - struct iavf_mac_filter *f; - struct iavf_adv_rss *rss; - - if (adapter->state <= __IAVF_DOWN_PENDING) - return; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - adapter->link_up = false; - iavf_napi_disable_all(adapter); - iavf_irq_disable(adapter); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock); - /* clear the sync flag on all filters */ __dev_uc_unsync(adapter->netdev, NULL); __dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->remove = true; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, + list) { + if (f->add) { + list_del(&f->list); + kfree(f); + } else { + f->remove = true; + } }
/* remove all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->remove = true; + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, + list) { + if (vlf->add) { + list_del(&vlf->list); + kfree(vlf); + } else { + vlf->remove = true; + } } - spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and + * mark other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) +{ + struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */ spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->del = true; + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + if (cf->add) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } else { + cf->del = true; + } } spin_unlock_bh(&adapter->cloud_filter_list_lock); +} + +/** + * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); - list_for_each_entry(fdir, &adapter->fdir_list_head, list) { - fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } } spin_unlock_bh(&adapter->fdir_fltr_lock); +} + +/** + * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) +{ + struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */ spin_lock_bh(&adapter->adv_rss_lock); - list_for_each_entry(rss, &adapter->adv_rss_list_head, list) - rss->state = IAVF_ADV_RSS_DEL_REQUEST; + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { + list_del(&rss->list); + kfree(rss); + } else { + rss->state = IAVF_ADV_RSS_DEL_REQUEST; + } + } spin_unlock_bh(&adapter->adv_rss_lock); +} + +/** + * iavf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + **/ +void iavf_down(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->state <= __IAVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + + iavf_clear_mac_vlan_filters(adapter); + iavf_clear_cloud_filters(adapter); + iavf_clear_fdir_filters(adapter); + iavf_clear_adv_rss_conf(adapter);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ @@@ -1410,16 -1335,11 +1407,16 @@@ * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; + if (!list_empty(&adapter->mac_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; + if (!list_empty(&adapter->vlan_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!list_empty(&adapter->cloud_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + if (!list_empty(&adapter->fdir_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + if (!list_empty(&adapter->adv_rss_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; }
@@@ -4258,7 -4178,6 +4255,7 @@@ err_unlock static int iavf_close(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); + u64 aq_to_restore; int status;
mutex_lock(&adapter->crit_lock); @@@ -4271,29 -4190,6 +4268,29 @@@ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; + /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before + * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl + * deadlock with adminq_task() until iavf_close timeouts. We must send + * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make + * disable queues possible for vf. Give only necessary flags to + * iavf_down and save other to set them right before iavf_close() + * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and + * iavf will be in DOWN state. + */ + aq_to_restore = adapter->aq_required; + adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; + + /* Remove flags which we do not want to send after close or we want to + * send before disable queues. + */ + aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | + IAVF_FLAG_AQ_ENABLE_QUEUES | + IAVF_FLAG_AQ_CONFIGURE_QUEUES | + IAVF_FLAG_AQ_ADD_VLAN_FILTER | + IAVF_FLAG_AQ_ADD_MAC_FILTER | + IAVF_FLAG_AQ_ADD_CLOUD_FILTER | + IAVF_FLAG_AQ_ADD_FDIR_FILTER | + IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter); iavf_change_state(adapter, __IAVF_DOWN_PENDING); @@@ -4317,10 -4213,6 +4314,10 @@@ msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); + + mutex_lock(&adapter->crit_lock); + adapter->aq_required |= aq_to_restore; + mutex_unlock(&adapter->crit_lock); return 0; }
diff --combined drivers/net/ethernet/intel/ice/ice_lib.c index d126f4cb3ba8,58d483e2f539..8a80da8e910e --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@@ -914,7 -914,7 +914,7 @@@ static void ice_set_dflt_vsi_ctx(struc */ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; + u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; u16 num_txq_per_tc, num_rxq_per_tc; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; @@@ -981,23 -981,25 +981,25 @@@ * at least 1) */ if (offset) - vsi->num_rxq = offset; + rx_count = offset; else - vsi->num_rxq = num_rxq_per_tc; + rx_count = num_rxq_per_tc;
- if (vsi->num_rxq > vsi->alloc_rxq) { + if (rx_count > vsi->alloc_rxq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", - vsi->num_rxq, vsi->alloc_rxq); + rx_count, vsi->alloc_rxq); return -EINVAL; }
- vsi->num_txq = tx_count; - if (vsi->num_txq > vsi->alloc_txq) { + if (tx_count > vsi->alloc_txq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", - vsi->num_txq, vsi->alloc_txq); + tx_count, vsi->alloc_txq); return -EINVAL; }
+ vsi->num_txq = tx_count; + vsi->num_rxq = rx_count; + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); /* since there is a chance that num_rxq could have been changed @@@ -1522,7 -1524,6 +1524,7 @@@ static int ice_vsi_alloc_rings(struct i ring->netdev = vsi->netdev; ring->dev = dev; ring->count = vsi->num_rx_desc; + ring->cached_phctime = pf->ptp.cached_phc_time; WRITE_ONCE(vsi->rx_rings[i], ring); }
@@@ -1562,22 -1563,6 +1564,22 @@@ void ice_vsi_manage_rss_lut(struct ice_ kfree(lut); }
+/** + * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI + * @vsi: VSI to be configured + * @disable: set to true to have FCS / CRC in the frame data + */ +void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) +{ + int i; + + ice_for_each_rxq(vsi, i) + if (disable) + vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; +} + /** * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI * @vsi: VSI to be configured @@@ -3293,12 -3278,6 +3295,12 @@@ int ice_vsi_rebuild(struct ice_vsi *vsi */ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_vsi_cfg_rss_lut_key(vsi); + + /* disable or enable CRC stripping */ + if (vsi->netdev) + ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features & + NETIF_F_RXFCS)); + break; case ICE_VSI_VF: ret = ice_vsi_alloc_q_vectors(vsi); @@@ -3513,6 -3492,7 +3515,7 @@@ ice_vsi_setup_q_map_mqprio(struct ice_v u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; + u16 new_txq, new_rxq; u8 netdev_tc = 0; int i;
@@@ -3553,21 -3533,24 +3556,24 @@@ } }
- /* Set actual Tx/Rx queue pairs */ - vsi->num_txq = offset + qcount_tx; - if (vsi->num_txq > vsi->alloc_txq) { + new_txq = offset + qcount_tx; + if (new_txq > vsi->alloc_txq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", - vsi->num_txq, vsi->alloc_txq); + new_txq, vsi->alloc_txq); return -EINVAL; }
- vsi->num_rxq = offset + qcount_rx; - if (vsi->num_rxq > vsi->alloc_rxq) { + new_rxq = offset + qcount_rx; + if (new_rxq > vsi->alloc_rxq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", - vsi->num_rxq, vsi->alloc_rxq); + new_rxq, vsi->alloc_rxq); return -EINVAL; }
+ /* Set actual Tx/Rx queue pairs */ + vsi->num_txq = new_txq; + vsi->num_rxq = new_rxq; + /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); @@@ -3599,6 -3582,7 +3605,7 @@@ int ice_vsi_cfg_tc(struct ice_vsi *vsi { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; + struct ice_tc_cfg old_tc_cfg; struct ice_vsi_ctx *ctx; struct device *dev; int i, ret = 0; @@@ -3623,6 -3607,7 +3630,7 @@@ max_txqs[i] = vsi->num_txq; }
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); vsi->tc_cfg.ena_tc = ena_tc; vsi->tc_cfg.numtc = num_tc;
@@@ -3639,8 -3624,10 +3647,10 @@@ else ret = ice_vsi_setup_q_map(vsi, ctx);
- if (ret) + if (ret) { + memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); goto out; + }
/* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); diff --combined drivers/net/ethernet/intel/ice/ice_main.c index aa26672b7205,e109cb93886b..0ccc8a750374 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -2399,8 -2399,6 +2399,6 @@@ int ice_schedule_reset(struct ice_pf *p return -EBUSY; }
- ice_unplug_aux_dev(pf); - switch (reset) { case ICE_RESET_PFR: set_bit(ICE_PFR_REQ, pf->state); @@@ -3095,8 -3093,7 +3093,8 @@@ static irqreturn_t ice_misc_intr(int __
if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - ice_ptp_process_ts(pf); + if (!hw->reset_ongoing) + ret = IRQ_WAKE_THREAD; }
if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@@ -3131,8 -3128,7 +3129,8 @@@ ice_service_task_schedule(pf); } } - ret = IRQ_HANDLED; + if (!ret) + ret = IRQ_HANDLED;
ice_service_task_schedule(pf); ice_irq_dynamic_ena(hw, NULL, NULL); @@@ -3140,24 -3136,6 +3138,24 @@@ return ret; }
+/** + * ice_misc_intr_thread_fn - misc interrupt thread function + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) +{ + irqreturn_t ret = IRQ_HANDLED; + struct ice_pf *pf = data; + bool irq_handled; + + irq_handled = ice_ptp_process_ts(pf); + if (!irq_handled) + ret = IRQ_WAKE_THREAD; + + return ret; +} + /** * ice_dis_ctrlq_interrupts - disable control queue interrupts * @hw: pointer to HW structure @@@ -3270,12 -3248,10 +3268,12 @@@ static int ice_req_irq_msix_misc(struc pf->num_avail_sw_msix -= 1; pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, - ice_misc_intr, 0, pf->int_name, pf); + err = devm_request_threaded_irq(dev, + pf->msix_entries[pf->oicr_idx].vector, + ice_misc_intr, ice_misc_intr_thread_fn, + 0, pf->int_name, pf); if (err) { - dev_err(dev, "devm_request_irq for %s failed: %d\n", + dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", pf->int_name, err); ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; @@@ -3415,11 -3391,6 +3413,11 @@@ static void ice_set_netdev_features(str if (is_dvm_ena) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + /* Leave CRC / FCS stripping enabled by default, but allow the value to + * be changed at runtime + */ + netdev->hw_features |= NETIF_F_RXFCS; }
/** @@@ -3951,135 -3922,88 +3949,135 @@@ static int ice_init_pf(struct ice_pf *p return 0; }
+/** + * ice_reduce_msix_usage - Reduce usage of MSI-X vectors + * @pf: board private structure + * @v_remain: number of remaining MSI-X vectors to be distributed + * + * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. + * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of + * remaining vectors. + */ +static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) +{ + int v_rdma; + + if (!ice_is_rdma_ena(pf)) { + pf->num_lan_msix = v_remain; + return; + } + + /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + + if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { + dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining vectors to LAN MSIX */ + pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; + pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; + } else { + /* Split remaining MSIX with RDMA after accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } +} + /** * ice_ena_msix_range - Request a range of MSIX vectors from the OS * @pf: board private structure * - * compute the number of MSIX vectors required (v_budget) and request from - * the OS. Return the number of vectors reserved or negative on failure + * Compute the number of MSIX vectors wanted and request from the OS. Adjust + * device usage if there are not enough vectors. Return the number of vectors + * reserved or negative on failure. */ static int ice_ena_msix_range(struct ice_pf *pf) { - int num_cpus, v_left, v_actual, v_other, v_budget = 0; + int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; struct device *dev = ice_pf_to_dev(pf); - int needed, err, i; + int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */ - needed = ICE_MIN_LAN_OICR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* reserve for flow director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { - needed = ICE_FDIR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - } - - /* reserve for switchdev */ - needed = ICE_ESWITCH_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* total used for non-traffic vectors */ - v_other = v_budget; - - /* reserve vectors for LAN traffic */ - needed = num_cpus; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_lan_msix = needed; - v_budget += needed; - v_left -= needed; - - /* reserve vectors for RDMA auxiliary driver */ + /* LAN miscellaneous handler */ + v_other = ICE_MIN_LAN_OICR_MSIX; + + /* Flow Director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + v_other += ICE_FDIR_MSIX; + + /* switchdev */ + v_other += ICE_ESWITCH_MSIX; + + v_wanted = v_other; + + /* LAN traffic */ + pf->num_lan_msix = num_cpus; + v_wanted += pf->num_lan_msix; + + /* RDMA auxiliary driver */ if (ice_is_rdma_ena(pf)) { - needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_rdma_msix = needed; - v_budget += needed; - v_left -= needed; + pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + v_wanted += pf->num_rdma_msix; + } + + if (v_wanted > hw_num_msix) { + int v_remain; + + dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", + v_wanted, hw_num_msix); + + if (hw_num_msix < ICE_MIN_MSIX) { + err = -ERANGE; + goto exit_err; + } + + v_remain = hw_num_msix - v_other; + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { + v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; + v_remain = ICE_MIN_LAN_TXRX_MSIX; + } + + ice_reduce_msix_usage(pf, v_remain); + v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; + + dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", + pf->num_lan_msix); + if (ice_is_rdma_ena(pf)) + dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); }
- pf->msix_entries = devm_kcalloc(dev, v_budget, + pf->msix_entries = devm_kcalloc(dev, v_wanted, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { err = -ENOMEM; goto exit_err; }
- for (i = 0; i < v_budget; i++) + for (i = 0; i < v_wanted; i++) pf->msix_entries[i].entry = i;
/* actually reserve the vectors */ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_budget); + ICE_MIN_MSIX, v_wanted); if (v_actual < 0) { dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; goto msix_err; }
- if (v_actual < v_budget) { + if (v_actual < v_wanted) { dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_budget, v_actual); + v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) { /* error if we can't get minimum vectors */ @@@ -4088,11 -4012,38 +4086,11 @@@ goto msix_err; } else { int v_remain = v_actual - v_other; - int v_rdma = 0, v_min_rdma = 0;
- if (ice_is_rdma_ena(pf)) { - /* Need at least 1 interrupt in addition to - * AEQ MSIX - */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - v_min_rdma = ICE_MIN_RDMA_MSIX; - } + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) + v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX || - v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { - dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining - * vectors to LAN MSIX - */ - pf->num_rdma_msix = v_min_rdma; - pf->num_lan_msix = v_remain - v_min_rdma; - } else { - /* Split remaining MSIX with RDMA after - * accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } + ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); @@@ -4107,7 -4058,12 +4105,7 @@@
msix_err: devm_kfree(dev, pf->msix_entries); - goto exit_err;
-no_hw_vecs_left_err: - dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", - needed, v_left); - err = -ERANGE; exit_err: pf->num_rdma_msix = 0; pf->num_lan_msix = 0; @@@ -4726,6 -4682,8 +4724,6 @@@ ice_probe(struct pci_dev *pdev, const s ice_set_safe_mode_caps(hw); }
- hw->ucast_shared = true; - err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); @@@ -5784,9 -5742,6 +5782,9 @@@ ice_fdb_del(struct ndmsg *ndm, __always NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX)
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_STAG_FILTER)
@@@ -5873,14 -5828,6 +5871,14 @@@ ice_fix_features(struct net_device *net NETIF_F_HW_VLAN_STAG_TX); }
+ if (!(netdev->features & NETIF_F_RXFCS) && + (features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES) && + !ice_vsi_has_non_zero_vlans(np->vsi)) { + netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + } + return features; }
@@@ -5974,13 -5921,6 +5972,13 @@@ ice_set_vlan_features(struct net_devic current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; if (current_vlan_features ^ requested_vlan_features) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); + return -EIO; + } + err = ice_set_vlan_offload_features(vsi, features); if (err) return err; @@@ -6062,23 -6002,6 +6060,23 @@@ ice_set_features(struct net_device *net if (ret) return ret;
+ /* Turn on receive of FCS aka CRC, and after setting this + * flag the packet data will have the 4 byte CRC appended + */ + if (changed & NETIF_F_RXFCS) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); + return -EIO; + } + + ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); + ret = ice_down_up(vsi); + if (ret) + return ret; + } + if (changed & NETIF_F_NTUPLE) { bool ena = !!(features & NETIF_F_NTUPLE);
@@@ -6726,7 -6649,7 +6724,7 @@@ static void ice_napi_disable_all(struc */ int ice_down(struct ice_vsi *vsi) { - int i, tx_err, rx_err, link_err = 0, vlan_err = 0; + int i, tx_err, rx_err, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
@@@ -6760,20 -6683,13 +6758,13 @@@
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { - link_err = ice_force_phys_link_state(vsi, false); - if (link_err) - netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", - vsi->vsi_num, link_err); - } - ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err || vlan_err) { + if (tx_err || rx_err || vlan_err) { netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); return -EIO; @@@ -6782,31 -6698,6 +6773,31 @@@ return 0; }
+/** + * ice_down_up - shutdown the VSI connection and bring it up + * @vsi: the VSI to be reconnected + */ +int ice_down_up(struct ice_vsi *vsi) +{ + int ret; + + /* if DOWN already set, nothing to do */ + if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) + return 0; + + ret = ice_down(vsi); + if (ret) + return ret; + + ret = ice_up(vsi); + if (ret) { + netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); + return ret; + } + + return 0; +} + /** * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources * @vsi: VSI having resources allocated @@@ -6960,6 -6851,8 +6951,8 @@@ int ice_vsi_open(struct ice_vsi *vsi if (err) goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); + if (vsi->type == ICE_VSI_PF) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); @@@ -8992,6 -8885,16 +8985,16 @@@ int ice_stop(struct net_device *netdev return -EBUSY; }
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { + int link_err = ice_force_phys_link_state(vsi, false); + + if (link_err) { + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + return -EIO; + } + } + ice_vsi_close(vsi);
return 0; diff --combined drivers/net/ethernet/intel/ice/ice_txrx.c index 42b42f4b21ef,97453d1dfafe..a5a0c9706b5a --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@@ -610,7 -610,7 +610,7 @@@ ice_xdp_xmit(struct net_device *dev, in if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) + if (!ice_is_xdp_ena_vsi(vsi)) return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@@ -621,6 -621,9 +621,9 @@@ xdp_ring = vsi->xdp_rings[queue_index]; spin_lock(&xdp_ring->tx_lock); } else { + /* Generally, should not happen */ + if (unlikely(queue_index >= vsi->num_xdp_txq)) + return -ENXIO; xdp_ring = vsi->xdp_rings[queue_index]; }
@@@ -2255,10 -2258,8 +2258,10 @@@ ice_tstamp(struct ice_tx_ring *tx_ring
/* Grab an open timestamp slot */ idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); - if (idx < 0) + if (idx < 0) { + tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; return; + }
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | diff --combined drivers/net/ethernet/marvell/prestera/prestera_main.c index 3956d6d5df3c,a895862b4821..639d3e940a88 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@@ -51,11 -51,6 +51,11 @@@ int prestera_port_mc_flood_set(struct p return prestera_hw_port_mc_flood_set(port, flood); }
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked) +{ + return prestera_hw_port_br_locked_set(port, br_locked); +} + int prestera_port_pvid_set(struct prestera_port *port, u16 vid) { enum prestera_accept_frm_type frm_type; @@@ -373,6 -368,7 +373,7 @@@ static int prestera_port_sfp_bind(struc if (!sw->np) return 0;
+ of_node_get(sw->np); ports = of_find_node_by_name(sw->np, "ports");
for_each_child_of_node(ports, node) { @@@ -422,6 -418,7 +423,7 @@@ }
out: + of_node_put(node); of_node_put(ports); return err; } @@@ -802,30 -799,32 +804,30 @@@ static void prestera_port_handle_event(
caching_dw = &port->cached_hw_stats.caching_dw;
- if (port->phy_link) { - memset(&smac, 0, sizeof(smac)); - smac.valid = true; - smac.oper = pevt->data.mac.oper; - if (smac.oper) { - smac.mode = pevt->data.mac.mode; - smac.speed = pevt->data.mac.speed; - smac.duplex = pevt->data.mac.duplex; - smac.fc = pevt->data.mac.fc; - smac.fec = pevt->data.mac.fec; - phylink_mac_change(port->phy_link, true); - } else { - phylink_mac_change(port->phy_link, false); - } - prestera_port_mac_state_cache_write(port, &smac); + memset(&smac, 0, sizeof(smac)); + smac.valid = true; + smac.oper = pevt->data.mac.oper; + if (smac.oper) { + smac.mode = pevt->data.mac.mode; + smac.speed = pevt->data.mac.speed; + smac.duplex = pevt->data.mac.duplex; + smac.fc = pevt->data.mac.fc; + smac.fec = pevt->data.mac.fec; } + prestera_port_mac_state_cache_write(port, &smac);
if (port->state_mac.oper) { - if (!port->phy_link) + if (port->phy_link) + phylink_mac_change(port->phy_link, true); + else netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw)) queue_delayed_work(prestera_wq, caching_dw, 0); - } else if (netif_running(port->dev) && - netif_carrier_ok(port->dev)) { - if (!port->phy_link) + } else { + if (port->phy_link) + phylink_mac_change(port->phy_link, false); + else if (netif_running(port->dev) && netif_carrier_ok(port->dev)) netif_carrier_off(port->dev);
if (delayed_work_pending(caching_dw)) diff --combined drivers/net/ethernet/mediatek/mtk_eth_soc.c index 516875cd698f,b344632beadd..49b7e8c1f4bd --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@@ -73,12 -73,6 +73,12 @@@ static const struct mtk_reg_map mtk_reg .fq_blen = 0x1b2c, }, .gdm1_cnt = 0x2400, + .gdma_to_ppe = 0x4444, + .ppe_base = 0x0c00, + .wdma_base = { + [0] = 0x2800, + [1] = 0x2c00, + }, };
static const struct mtk_reg_map mt7628_reg_map = { @@@ -132,12 -126,6 +132,12 @@@ static const struct mtk_reg_map mt7986_ .fq_blen = 0x472c, }, .gdm1_cnt = 0x1c00, + .gdma_to_ppe = 0x3333, + .ppe_base = 0x2000, + .wdma_base = { + [0] = 0x4800, + [1] = 0x4c00, + }, };
/* strings used by ethtool */ @@@ -1470,7 -1458,7 +1470,7 @@@ static void mtk_update_rx_cpu_idx(struc
static bool mtk_page_pool_enabled(struct mtk_eth *eth) { - return !eth->hwlro; + return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); }
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, @@@ -1585,8 -1573,8 +1585,8 @@@ static int mtk_xdp_submit_frame(struct .last = !xdp_frame_has_frags(xdpf), }; int err, index = 0, n_desc = 1, nr_frags; - struct mtk_tx_dma *htxd, *txd, *txd_pdma; struct mtk_tx_buf *htx_buf, *tx_buf; + struct mtk_tx_dma *htxd, *txd; void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, ð->state))) @@@ -1620,6 -1608,7 +1620,6 @@@
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { txd = mtk_qdma_phys_to_virt(ring, txd->txd2); - txd_pdma = qdma_to_pdma(ring, txd); if (txd == ring->last_free) goto unmap;
@@@ -1640,8 -1629,7 +1640,8 @@@ htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - txd_pdma = qdma_to_pdma(ring, txd); + struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd); + if (index & 1) txd_pdma->txd2 |= TX_DMA_LS0; else @@@ -1672,15 -1660,13 +1672,15 @@@
unmap: while (htxd != txd) { - txd_pdma = qdma_to_pdma(ring, htxd); tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; - if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd); + txd_pdma->txd2 = TX_DMA_DESP2_DEF; + }
htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); } @@@ -1906,14 -1892,12 +1906,14 @@@ static int mtk_poll_rx(struct napi_stru bytes += skb->len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; if (hash != MTK_RXD5_FOE_ENTRY) skb_set_hash(skb, jhash_1word(hash, 0), PKT_HASH_TYPE_L4); rxdcsum = &trxd.rxd3; } else { + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; if (hash != MTK_RXD4_FOE_ENTRY) skb_set_hash(skb, jhash_1word(hash, 0), @@@ -1927,8 -1911,9 +1927,8 @@@ skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev);
- reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) - mtk_ppe_check_skb(eth->ppe, skb, hash); + mtk_ppe_check_skb(eth->ppe[0], skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { @@@ -2991,25 -2976,21 +2991,25 @@@ static int mtk_open(struct net_device *
/* we run 2 netdevs on the same dma ring so we only bring it up once */ if (!refcount_read(ð->dma_refcnt)) { - u32 gdm_config = MTK_GDMA_TO_PDMA; + const struct mtk_soc_data *soc = eth->soc; + u32 gdm_config; + int i;
err = mtk_start_dma(eth); if (err) return err;
- if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) - gdm_config = MTK_GDMA_TO_PPE; + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) + mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe + : MTK_GDMA_TO_PDMA; mtk_gdm_config(eth, gdm_config);
napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); refcount_set(ð->dma_refcnt, 1); } else @@@ -3047,7 -3028,6 +3047,7 @@@ static int mtk_stop(struct net_device * { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + int i;
phylink_stop(mac->phylink);
@@@ -3075,8 -3055,8 +3075,8 @@@
mtk_dma_free(eth);
- if (eth->soc->offload_version) - mtk_ppe_stop(eth->ppe); + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) + mtk_ppe_stop(eth->ppe[i]);
return 0; } @@@ -3576,8 -3556,8 +3576,8 @@@ static void mtk_get_drvinfo(struct net_ { struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); + strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); + strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); }
@@@ -3945,7 -3925,6 +3945,7 @@@ void mtk_eth_set_dma_device(struct mtk_
static int mtk_probe(struct platform_device *pdev) { + struct resource *res = NULL; struct device_node *mac_np; struct mtk_eth *eth; int err, i; @@@ -4026,31 -4005,20 +4026,31 @@@ } }
- for (i = 0;; i++) { - struct device_node *np = of_parse_phandle(pdev->dev.of_node, - "mediatek,wed", i); - static const u32 wdma_regs[] = { - MTK_WDMA0_BASE, - MTK_WDMA1_BASE - }; - void __iomem *wdma; - - if (!np || i >= ARRAY_SIZE(wdma_regs)) - break; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + }
- wdma = eth->base + wdma_regs[i]; - mtk_wed_add_hw(np, eth, wdma, i); + if (eth->soc->offload_version) { + for (i = 0;; i++) { + struct device_node *np; + phys_addr_t wdma_phy; + u32 wdma_base; + + if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) + break; + + np = of_parse_phandle(pdev->dev.of_node, + "mediatek,wed", i); + if (!np) + break; + + wdma_base = eth->soc->reg_map->wdma_base[i]; + wdma_phy = res ? res->start + wdma_base : 0; + mtk_wed_add_hw(np, eth, eth->base + wdma_base, + wdma_phy, i); + } }
for (i = 0; i < 3; i++) { @@@ -4128,19 -4096,10 +4128,19 @@@ }
if (eth->soc->offload_version) { - eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); - if (!eth->ppe) { - err = -ENOMEM; - goto err_free_dev; + u32 num_ppe; + + num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; + num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); + for (i = 0; i < num_ppe; i++) { + u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; + + eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, + eth->soc->offload_version, i); + if (!eth->ppe[i]) { + err = -ENOMEM; + goto err_free_dev; + } }
err = mtk_eth_offload_init(eth); @@@ -4233,8 -4192,6 +4233,8 @@@ static const struct mtk_soc_data mt7621 .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, .offload_version = 2, + .hash_offset = 2, + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@@ -4253,8 -4210,6 +4253,8 @@@ static const struct mtk_soc_data mt7622 .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, .offload_version = 2, + .hash_offset = 2, + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@@ -4272,8 -4227,6 +4272,8 @@@ static const struct mtk_soc_data mt7623 .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, .offload_version = 2, + .hash_offset = 2, + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@@ -4305,11 -4258,8 +4305,11 @@@ static const struct mtk_soc_data mt7986 .reg_map = &mt7986_reg_map, .ana_rgc3 = 0x128, .caps = MT7986_CAPS, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7986_CLKS_BITMAP, .required_pctl = false, + .hash_offset = 4, + .foe_entry_size = sizeof(struct mtk_foe_entry), .txrx = { .txd_size = sizeof(struct mtk_tx_dma_v2), .rxd_size = sizeof(struct mtk_rx_dma_v2), diff --combined drivers/net/ethernet/mellanox/mlx5/core/main.c index 1986f1c715b5,89b2d9cea33f..b45cef89370e --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@@ -494,6 -494,24 +494,24 @@@ static int max_uc_list_get_devlink_para return err; }
+ bool mlx5_is_roce_on(struct mlx5_core_dev *dev) + { + struct devlink *devlink = priv_to_devlink(dev); + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + &val); + + if (!err) + return val.vbool; + + mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err); + return MLX5_CAP_GEN(dev, roce); + } + EXPORT_SYMBOL(mlx5_is_roce_on); + static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) { void *set_hca_cap; @@@ -597,7 -615,8 +615,8 @@@ static int handle_hca_cap(struct mlx5_c MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported)) - MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev)); + MLX5_SET(cmd_hca_cap, set_hca_cap, roce, + mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev); if (max_uc_list > 0) @@@ -623,7 -642,7 +642,7 @@@ */ static bool is_roce_fw_disabled(struct mlx5_core_dev *dev) { - return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) || + return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) || (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce)); }
@@@ -1488,7 -1507,6 +1507,7 @@@ static const int types[] = MLX5_CAP_IPSEC, MLX5_CAP_PORT_SELECTION, MLX5_CAP_DEV_SHAMPO, + MLX5_CAP_MACSEC, };
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) diff --combined drivers/net/ethernet/renesas/ravb_main.c index a550ffec9000,7e32b04eb0c7..c992bf1c711e --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -540,13 -540,7 +540,13 @@@ static void ravb_emac_init_gbeth(struc /* E-MAC interrupt enable register */ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0); + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); + } else { + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, + CXR31_SEL_LINK0); + } }
static void ravb_emac_init_rcar(struct net_device *ndev) @@@ -1455,6 -1449,8 +1455,8 @@@ static int ravb_phy_init(struct net_dev phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); }
+ /* Indicate that the MAC is responsible for managing PHY PM */ + phydev->mac_managed_pm = true; phy_attached_info(phydev);
return 0; @@@ -2518,7 -2514,6 +2520,7 @@@ static const struct of_device_id ravb_m { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info }, { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, { } diff --combined drivers/net/ethernet/sun/sunhme.c index 1921054b7f7d,88aa0d310aee..e660902cfdf7 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@@ -2020,9 -2020,9 +2020,9 @@@ static void happy_meal_rx(struct happy_
skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), @@@ -2467,11 -2467,11 +2467,11 @@@ static void hme_get_drvinfo(struct net_ { struct happy_meal *hp = netdev_priv(dev);
- strlcpy(info->driver, "sunhme", sizeof(info->driver)); - strlcpy(info->version, "2.02", sizeof(info->version)); + strscpy(info->driver, "sunhme", sizeof(info->driver)); + strscpy(info->version, "2.02", sizeof(info->version)); if (hp->happy_flags & HFLAG_PCI) { struct pci_dev *pdev = hp->happy_dev; - strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); } #ifdef CONFIG_SBUS else { diff --combined drivers/net/phy/micrel.c index f5c3c5c2fe11,38234d7e14c5..6a8c740dce65 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@@ -92,15 -92,6 +92,15 @@@ #define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0) #define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+#define KSZPHY_WIRE_PAIR_MASK 0x3 + +#define LAN8814_CABLE_DIAG 0x12 +#define LAN8814_CABLE_DIAG_STAT_MASK GENMASK(9, 8) +#define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0) +#define LAN8814_PAIR_BIT_SHIFT 12 + +#define LAN8814_WIRE_PAIR_MASK 0xF + /* Lan8814 general Interrupt control/status reg in GPHY specific block. */ #define LAN8814_INTC 0x18 #define LAN8814_INTS 0x1B @@@ -266,8 -257,6 +266,8 @@@ static struct kszphy_hw_stat kszphy_hw_ struct kszphy_type { u32 led_mode_reg; u16 interrupt_level_mask; + u16 cable_diag_reg; + unsigned long pair_mask; bool has_broadcast_disable; bool has_nand_tree_disable; bool has_rmii_ref_clk_sel; @@@ -324,13 -313,6 +324,13 @@@ struct kszphy_priv
static const struct kszphy_type lan8814_type = { .led_mode_reg = ~LAN8814_LED_CTRL_1, + .cable_diag_reg = LAN8814_CABLE_DIAG, + .pair_mask = LAN8814_WIRE_PAIR_MASK, +}; + +static const struct kszphy_type ksz886x_type = { + .cable_diag_reg = KSZ8081_LMD, + .pair_mask = KSZPHY_WIRE_PAIR_MASK, };
static const struct kszphy_type ksz8021_type = { @@@ -1668,7 -1650,7 +1668,7 @@@ static void kszphy_get_strings(struct p int i;
for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { - strlcpy(data + i * ETH_GSTRING_LEN, + strscpy(data + i * ETH_GSTRING_LEN, kszphy_hw_stats[i].string, ETH_GSTRING_LEN); } } @@@ -1814,17 -1796,6 +1814,17 @@@ static int kszphy_probe(struct phy_devi return 0; }
+static int lan8814_cable_test_start(struct phy_device *phydev) +{ + /* If autoneg is enabled, we won't be able to test cross pair + * short. In this case, the PHY will "detect" a link and + * confuse the internal state machine - disable auto neg here. + * Set the speed to 1000mbit and full duplex. + */ + return phy_modify(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100, + BMCR_SPEED1000 | BMCR_FULLDPLX); +} + static int ksz886x_cable_test_start(struct phy_device *phydev) { if (phydev->dev_flags & MICREL_KSZ8_P1_ERRATA) @@@ -1838,9 -1809,9 +1838,9 @@@ return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100); }
-static int ksz886x_cable_test_result_trans(u16 status) +static int ksz886x_cable_test_result_trans(u16 status, u16 mask) { - switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) { + switch (FIELD_GET(mask, status)) { case KSZ8081_LMD_STAT_NORMAL: return ETHTOOL_A_CABLE_RESULT_CODE_OK; case KSZ8081_LMD_STAT_SHORT: @@@ -1854,15 -1825,15 +1854,15 @@@ } }
-static bool ksz886x_cable_test_failed(u16 status) +static bool ksz886x_cable_test_failed(u16 status, u16 mask) { - return FIELD_GET(KSZ8081_LMD_STAT_MASK, status) == + return FIELD_GET(mask, status) == KSZ8081_LMD_STAT_FAIL; }
-static bool ksz886x_cable_test_fault_length_valid(u16 status) +static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask) { - switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) { + switch (FIELD_GET(mask, status)) { case KSZ8081_LMD_STAT_OPEN: fallthrough; case KSZ8081_LMD_STAT_SHORT: @@@ -1871,79 -1842,29 +1871,79 @@@ return false; }
-static int ksz886x_cable_test_fault_length(u16 status) +static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask) { int dt;
/* According to the data sheet the distance to the fault is - * DELTA_TIME * 0.4 meters. + * DELTA_TIME * 0.4 meters for ksz phys. + * (DELTA_TIME - 22) * 0.8 for lan8814 phy. */ - dt = FIELD_GET(KSZ8081_LMD_DELTA_TIME_MASK, status); + dt = FIELD_GET(data_mask, status);
- return (dt * 400) / 10; + if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_LAN8814) + return ((dt - 22) * 800) / 10; + else + return (dt * 400) / 10; }
static int ksz886x_cable_test_wait_for_completion(struct phy_device *phydev) { + const struct kszphy_type *type = phydev->drv->driver_data; int val, ret;
- ret = phy_read_poll_timeout(phydev, KSZ8081_LMD, val, + ret = phy_read_poll_timeout(phydev, type->cable_diag_reg, val, !(val & KSZ8081_LMD_ENABLE_TEST), 30000, 100000, true);
return ret < 0 ? ret : 0; }
+static int lan8814_cable_test_one_pair(struct phy_device *phydev, int pair) +{ + static const int ethtool_pair[] = { ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_PAIR_B, + ETHTOOL_A_CABLE_PAIR_C, + ETHTOOL_A_CABLE_PAIR_D, + }; + u32 fault_length; + int ret; + int val; + + val = KSZ8081_LMD_ENABLE_TEST; + val = val | (pair << LAN8814_PAIR_BIT_SHIFT); + + ret = phy_write(phydev, LAN8814_CABLE_DIAG, val); + if (ret < 0) + return ret; + + ret = ksz886x_cable_test_wait_for_completion(phydev); + if (ret) + return ret; + + val = phy_read(phydev, LAN8814_CABLE_DIAG); + if (val < 0) + return val; + + if (ksz886x_cable_test_failed(val, LAN8814_CABLE_DIAG_STAT_MASK)) + return -EAGAIN; + + ret = ethnl_cable_test_result(phydev, ethtool_pair[pair], + ksz886x_cable_test_result_trans(val, + LAN8814_CABLE_DIAG_STAT_MASK + )); + if (ret) + return ret; + + if (!ksz886x_cable_test_fault_length_valid(val, LAN8814_CABLE_DIAG_STAT_MASK)) + return 0; + + fault_length = ksz886x_cable_test_fault_length(phydev, val, + LAN8814_CABLE_DIAG_VCT_DATA_MASK); + + return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length); +} + static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair) { static const int ethtool_pair[] = { @@@ -1951,7 -1872,6 +1951,7 @@@ ETHTOOL_A_CABLE_PAIR_B, }; int ret, val, mdix; + u32 fault_length;
/* There is no way to choice the pair, like we do one ksz9031. * We can workaround this limitation by using the MDI-X functionality. @@@ -1990,27 -1910,25 +1990,27 @@@ if (val < 0) return val;
- if (ksz886x_cable_test_failed(val)) + if (ksz886x_cable_test_failed(val, KSZ8081_LMD_STAT_MASK)) return -EAGAIN;
ret = ethnl_cable_test_result(phydev, ethtool_pair[pair], - ksz886x_cable_test_result_trans(val)); + ksz886x_cable_test_result_trans(val, KSZ8081_LMD_STAT_MASK)); if (ret) return ret;
- if (!ksz886x_cable_test_fault_length_valid(val)) + if (!ksz886x_cable_test_fault_length_valid(val, KSZ8081_LMD_STAT_MASK)) return 0;
- return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], - ksz886x_cable_test_fault_length(val)); + fault_length = ksz886x_cable_test_fault_length(phydev, val, KSZ8081_LMD_DELTA_TIME_MASK); + + return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length); }
static int ksz886x_cable_test_get_status(struct phy_device *phydev, bool *finished) { - unsigned long pair_mask = 0x3; + const struct kszphy_type *type = phydev->drv->driver_data; + unsigned long pair_mask = type->pair_mask; int retries = 20; int pair, ret;
@@@ -2019,10 -1937,7 +2019,10 @@@ /* Try harder if link partner is active */ while (pair_mask && retries--) { for_each_set_bit(pair, &pair_mask, 4) { - ret = ksz886x_cable_test_one_pair(phydev, pair); + if (type->cable_diag_reg == LAN8814_CABLE_DIAG) + ret = lan8814_cable_test_one_pair(phydev, pair); + else + ret = ksz886x_cable_test_one_pair(phydev, pair); if (ret == -EAGAIN) continue; if (ret < 0) @@@ -2761,79 -2676,22 +2761,82 @@@ static int lan8804_config_init(struct p return 0; }
+static irqreturn_t lan8804_handle_interrupt(struct phy_device *phydev) +{ + int status; + + status = phy_read(phydev, LAN8814_INTS); + if (status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (status > 0) + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +#define LAN8804_OUTPUT_CONTROL 25 +#define LAN8804_OUTPUT_CONTROL_INTR_BUFFER BIT(14) +#define LAN8804_CONTROL 31 +#define LAN8804_CONTROL_INTR_POLARITY BIT(14) + +static int lan8804_config_intr(struct phy_device *phydev) +{ + int err; + + /* This is an internal PHY of lan966x and is not possible to change the + * polarity on the GIC found in lan966x, therefore change the polarity + * of the interrupt in the PHY from being active low instead of active + * high. + */ + phy_write(phydev, LAN8804_CONTROL, LAN8804_CONTROL_INTR_POLARITY); + + /* By default interrupt buffer is open-drain in which case the interrupt + * can be active only low. Therefore change the interrupt buffer to be + * push-pull to be able to change interrupt polarity + */ + phy_write(phydev, LAN8804_OUTPUT_CONTROL, + LAN8804_OUTPUT_CONTROL_INTR_BUFFER); + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = phy_read(phydev, LAN8814_INTS); + if (err < 0) + return err; + + err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK); + if (err) + return err; + } else { + err = phy_write(phydev, LAN8814_INTC, 0); + if (err) + return err; + + err = phy_read(phydev, LAN8814_INTS); + if (err < 0) + return err; + } + + return 0; +} + static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) { int irq_status, tsu_irq_status; + int ret = IRQ_NONE;
irq_status = phy_read(phydev, LAN8814_INTS); - if (irq_status > 0 && (irq_status & LAN8814_INT_LINK)) - phy_trigger_machine(phydev); - if (irq_status < 0) { phy_error(phydev); return IRQ_NONE; }
+ if (irq_status & LAN8814_INT_LINK) { + phy_trigger_machine(phydev); + ret = IRQ_HANDLED; + } + while (1) { tsu_irq_status = lanphy_read_page_reg(phydev, 4, LAN8814_INTR_STS_REG); @@@ -2842,12 -2700,15 +2845,15 @@@ (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ | LAN8814_INTR_STS_REG_1588_TSU1_ | LAN8814_INTR_STS_REG_1588_TSU2_ | - LAN8814_INTR_STS_REG_1588_TSU3_))) + LAN8814_INTR_STS_REG_1588_TSU3_))) { lan8814_handle_ptp_interrupt(phydev); - else + ret = IRQ_HANDLED; + } else { break; + } } - return IRQ_HANDLED; + + return ret; }
static int lan8814_ack_interrupt(struct phy_device *phydev) @@@ -2874,9 -2735,9 +2880,9 @@@ static int lan8814_config_intr(struct p if (err) return err;
- err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK); + err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK); } else { - err = phy_write(phydev, LAN8814_INTC, 0); + err = phy_write(phydev, LAN8814_INTC, 0); if (err) return err;
@@@ -3256,7 -3117,6 +3262,7 @@@ static struct phy_driver ksphy_driver[ .phy_id = PHY_ID_LAN8814, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip INDY Gigabit Quad PHY", + .flags = PHY_POLL_CABLE_TEST, .config_init = lan8814_config_init, .driver_data = &lan8814_type, .probe = lan8814_probe, @@@ -3269,8 -3129,6 +3275,8 @@@ .resume = kszphy_resume, .config_intr = lan8814_config_intr, .handle_interrupt = lan8814_handle_interrupt, + .cable_test_start = lan8814_cable_test_start, + .cable_test_get_status = ksz886x_cable_test_get_status, }, { .phy_id = PHY_ID_LAN8804, .phy_id_mask = MICREL_PHY_ID_MASK, @@@ -3285,8 -3143,6 +3291,8 @@@ .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = kszphy_resume, + .config_intr = lan8804_config_intr, + .handle_interrupt = lan8804_handle_interrupt, }, { .phy_id = PHY_ID_KSZ9131, .phy_id_mask = MICREL_PHY_ID_MASK, @@@ -3319,7 -3175,6 +3325,7 @@@ .phy_id = PHY_ID_KSZ886X, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch", + .driver_data = &ksz886x_type, /* PHY_BASIC_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .config_init = kszphy_config_init, diff --combined drivers/net/team/team.c index ab92416d861f,154a3c0a6dfd..62ade69295a9 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -1275,10 -1275,12 +1275,12 @@@ static int team_port_add(struct team *t } }
- netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + }
port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); @@@ -1349,8 -1351,10 +1351,10 @@@ static int team_port_del(struct team *t netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port);
@@@ -1700,6 -1704,14 +1704,14 @@@ static int team_open(struct net_device
static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; }
@@@ -2070,8 -2082,8 +2082,8 @@@ static const struct net_device_ops team static void team_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); + strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); }
static int team_ethtool_get_link_ksettings(struct net_device *dev, @@@ -2840,7 -2852,6 +2852,7 @@@ static struct genl_family team_nl_famil .module = THIS_MODULE, .small_ops = team_nl_ops, .n_small_ops = ARRAY_SIZE(team_nl_ops), + .resv_start_op = TEAM_CMD_PORT_LIST_GET + 1, .mcgrps = team_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps), }; diff --combined drivers/net/wireguard/netlink.c index 0c0644e762e5,5c804bcabfe6..43c8c84e7ea8 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@@ -436,14 -436,13 +436,13 @@@ static int set_peer(struct wg_device *w if (attrs[WGPEER_A_ENDPOINT]) { struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) && - addr->sa_family == AF_INET) || - (len == sizeof(struct sockaddr_in6) && - addr->sa_family == AF_INET6)) { - struct endpoint endpoint = { { { 0 } } }; - - memcpy(&endpoint.addr, addr, len); + if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { + endpoint.addr4 = *(struct sockaddr_in *)addr; + wg_socket_set_peer_endpoint(peer, &endpoint); + } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { + endpoint.addr6 = *(struct sockaddr_in6 *)addr; wg_socket_set_peer_endpoint(peer, &endpoint); } } @@@ -621,7 -620,6 +620,7 @@@ static const struct genl_ops genl_ops[ static struct genl_family genl_family __ro_after_init = { .ops = genl_ops, .n_ops = ARRAY_SIZE(genl_ops), + .resv_start_op = WG_CMD_SET_DEVICE + 1, .name = WG_GENL_NAME, .version = WG_GENL_VERSION, .maxattr = WGDEVICE_A_MAX, diff --combined drivers/pinctrl/pinctrl-ocelot.c index 340ca2373429,c7df8c5fe585..83464e0bf4e6 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@@ -10,7 -10,6 +10,7 @@@ #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/mfd/ocelot.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_platform.h> @@@ -332,6 -331,7 +332,7 @@@ struct ocelot_pinctrl const struct ocelot_pincfg_data *pincfg_data; struct ocelot_pmx_func func[FUNC_MAX]; u8 stride; + struct workqueue_struct *wq; };
struct ocelot_match_data { @@@ -339,6 -339,11 +340,11 @@@ struct ocelot_pincfg_data pincfg_data; };
+ struct ocelot_irq_work { + struct work_struct irq_work; + struct irq_desc *irq_desc; + }; + #define LUTON_P(p, f0, f1) \ static struct ocelot_pin_caps luton_pin_##p = { \ .pin = p, \ @@@ -1814,6 -1819,75 +1820,75 @@@ static void ocelot_irq_mask(struct irq_ gpiochip_disable_irq(chip, gpio); }
+ static void ocelot_irq_work(struct work_struct *work) + { + struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work); + struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc); + struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc); + struct irq_data *data = irq_desc_get_irq_data(w->irq_desc); + unsigned int gpio = irqd_to_hwirq(data); + + local_irq_disable(); + chained_irq_enter(parent_chip, w->irq_desc); + generic_handle_domain_irq(chip->irq.domain, gpio); + chained_irq_exit(parent_chip, w->irq_desc); + local_irq_enable(); + + kfree(w); + } + + static void ocelot_irq_unmask_level(struct irq_data *data) + { + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct ocelot_pinctrl *info = gpiochip_get_data(chip); + struct irq_desc *desc = irq_data_to_desc(data); + unsigned int gpio = irqd_to_hwirq(data); + unsigned int bit = BIT(gpio % 32); + bool ack = false, active = false; + u8 trigger_level; + int val; + + trigger_level = irqd_get_trigger_type(data); + + /* Check if the interrupt line is still active. */ + regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val); + if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) || + (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH)) + active = true; + + /* + * Check if the interrupt controller has seen any changes in the + * interrupt line. + */ + regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val); + if (val & bit) + ack = true; + + /* Enable the interrupt now */ + gpiochip_enable_irq(chip, gpio); + regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), + bit, bit); + + /* + * In case the interrupt line is still active and the interrupt + * controller has not seen any changes in the interrupt line, then it + * means that there happen another interrupt while the line was active. + * So we missed that one, so we need to kick the interrupt again + * handler. + */ + if (active && !ack) { + struct ocelot_irq_work *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->irq_desc = desc; + INIT_WORK(&work->irq_work, ocelot_irq_work); + queue_work(info->wq, &work->irq_work); + } + } + static void ocelot_irq_unmask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); @@@ -1837,13 -1911,12 +1912,12 @@@ static void ocelot_irq_ack(struct irq_d
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
- static struct irq_chip ocelot_eoi_irqchip = { + static struct irq_chip ocelot_level_irqchip = { .name = "gpio", .irq_mask = ocelot_irq_mask, - .irq_eoi = ocelot_irq_ack, - .irq_unmask = ocelot_irq_unmask, - .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | - IRQCHIP_IMMUTABLE, + .irq_ack = ocelot_irq_ack, + .irq_unmask = ocelot_irq_unmask_level, + .flags = IRQCHIP_IMMUTABLE, .irq_set_type = ocelot_irq_set_type, GPIOCHIP_IRQ_RESOURCE_HELPERS }; @@@ -1860,14 -1933,9 +1934,9 @@@ static struct irq_chip ocelot_irqchip
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type) { - type &= IRQ_TYPE_SENSE_MASK; - - if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH))) - return -EINVAL; - - if (type & IRQ_TYPE_LEVEL_HIGH) - irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip, - handle_fasteoi_irq, NULL); + if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip, + handle_level_irq, NULL); if (type & IRQ_TYPE_EDGE_BOTH) irq_set_chip_handler_name_locked(data, &ocelot_irqchip, handle_edge_irq, NULL); @@@ -1976,6 -2044,7 +2045,6 @@@ static int ocelot_pinctrl_probe(struct struct ocelot_pinctrl *info; struct reset_control *reset; struct regmap *pincfg; - void __iomem *base; int ret; struct regmap_config regmap_config = { .reg_bits = 32, @@@ -1996,6 -2065,10 +2065,10 @@@ if (!info->desc) return -ENOMEM;
+ info->wq = alloc_ordered_workqueue("ocelot_ordered", 0); + if (!info->wq) + return -ENOMEM; + info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch"); @@@ -2004,15 -2077,21 +2077,15 @@@ "Failed to get reset\n"); reset_control_reset(reset);
- base = devm_ioremap_resource(dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); - if (IS_ERR(base)) - return PTR_ERR(base); - info->stride = 1 + (info->desc->npins - 1) / 32;
regmap_config.max_register = OCELOT_GPIO_SD_MAP * info->stride + 15 * 4;
- info->map = devm_regmap_init_mmio(dev, base, ®map_config); - if (IS_ERR(info->map)) { - dev_err(dev, "Failed to create regmap\n"); - return PTR_ERR(info->map); - } + info->map = ocelot_regmap_from_resource(pdev, 0, ®map_config); + if (IS_ERR(info->map)) + return dev_err_probe(dev, PTR_ERR(info->map), + "Failed to create regmap\n"); - dev_set_drvdata(dev, info->map); + dev_set_drvdata(dev, info); info->dev = dev;
/* Pinconf registers */ @@@ -2037,6 -2116,15 +2110,15 @@@ return 0; }
+ static int ocelot_pinctrl_remove(struct platform_device *pdev) + { + struct ocelot_pinctrl *info = platform_get_drvdata(pdev); + + destroy_workqueue(info->wq); + + return 0; + } + static struct platform_driver ocelot_pinctrl_driver = { .driver = { .name = "pinctrl-ocelot", @@@ -2044,6 -2132,7 +2126,7 @@@ .suppress_bind_attrs = true, }, .probe = ocelot_pinctrl_probe, + .remove = ocelot_pinctrl_remove, }; module_platform_driver(ocelot_pinctrl_driver); MODULE_LICENSE("Dual MIT/GPL"); diff --combined net/bridge/netfilter/ebtables.c index 8f6639e095a0,4f385d52a1c4..ce5dfa3babd2 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@@ -1040,8 -1040,10 +1040,10 @@@ static int do_replace_finish(struct ne goto free_iterate; }
- if (repl->valid_hooks != t->valid_hooks) + if (repl->valid_hooks != t->valid_hooks) { + ret = -EINVAL; goto free_unlock; + }
if (repl->num_counters && repl->num_counters != t->private->nentries) { ret = -EINVAL; @@@ -1440,7 -1442,7 +1442,7 @@@ static inline int ebt_obj_to_user(char /* ebtables expects 31 bytes long names but xt_match names are 29 bytes * long. Copy 29 bytes and fill remaining bytes with zeroes. */ - strlcpy(name, _name, sizeof(name)); + strscpy(name, _name, sizeof(name)); if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) || put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) || put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) || diff --combined net/core/flow_dissector.c index 9671d79dd986,5dc3860e9fc7..25cd35f5922e --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@@ -204,30 -204,6 +204,30 @@@ static void __skb_flow_dissect_icmp(con skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen); }
+static void __skb_flow_dissect_l2tpv3(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, const void *data, + int nhoff, int hlen) +{ + struct flow_dissector_key_l2tpv3 *key_l2tpv3; + struct { + __be32 session_id; + } *hdr, _hdr; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_L2TPV3)) + return; + + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); + if (!hdr) + return; + + key_l2tpv3 = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_L2TPV3, + target_container); + + key_l2tpv3->session_id = hdr->session_id; +} + void skb_flow_dissect_meta(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container) @@@ -890,8 -866,8 +890,8 @@@ static void __skb_flow_bpf_to_target(co } }
-bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen, unsigned int flags) +u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, + __be16 proto, int nhoff, int hlen, unsigned int flags) { struct bpf_flow_keys *flow_keys = ctx->flow_keys; u32 result; @@@ -916,7 -892,7 +916,7 @@@ flow_keys->thoff = clamp_t(u16, flow_keys->thoff, flow_keys->nhoff, hlen);
- return result == BPF_OK; + return result; }
static bool is_pppoe_ses_hdr_valid(const struct pppoe_hdr *hdr) @@@ -1032,7 -1008,6 +1032,7 @@@ bool __skb_flow_dissect(const struct ne }; __be16 n_proto = proto; struct bpf_prog *prog; + u32 result;
if (skb) { ctx.skb = skb; @@@ -1044,16 -1019,13 +1044,16 @@@ }
prog = READ_ONCE(run_array->items[0].prog); - ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, - hlen, flags); + result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, + hlen, flags); + if (result == BPF_FLOW_DISSECTOR_CONTINUE) + goto dissect_continue; __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); rcu_read_unlock(); - return ret; + return result == BPF_OK; } +dissect_continue: rcu_read_unlock(); }
@@@ -1201,8 -1173,8 +1201,8 @@@ proto_again nhoff += sizeof(*vlan); }
- if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) { + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_NUM_OF_VLANS) && + !(key_control->flags & FLOW_DIS_ENCAPSULATION)) { struct flow_dissector_key_num_of_vlans *key_nvs;
key_nvs = skb_flow_dissector_target(flow_dissector, @@@ -1525,10 -1497,6 +1525,10 @@@ ip_proto_again __skb_flow_dissect_icmp(skb, flow_dissector, target_container, data, nhoff, hlen); break; + case IPPROTO_L2TP: + __skb_flow_dissect_l2tpv3(skb, flow_dissector, target_container, + data, nhoff, hlen); + break;
default: break; @@@ -1643,9 -1611,8 +1643,8 @@@ static inline void __flow_hash_consiste
switch (keys->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - addr_diff = (__force u32)keys->addrs.v4addrs.dst - - (__force u32)keys->addrs.v4addrs.src; - if (addr_diff < 0) + if ((__force u32)keys->addrs.v4addrs.dst < + (__force u32)keys->addrs.v4addrs.src) swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
if ((__force u16)keys->ports.dst < diff --combined net/ipv4/ipmr.c index 95eefbe2e142,e11d6b0b62b7..e04544ac4b45 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@@ -1004,7 -1004,9 +1004,9 @@@ static void ipmr_cache_resolve(struct n
rtnl_unicast(skb, net, NETLINK_CB(skb).portid); } else { + rcu_read_lock(); ip_mr_forward(net, mrt, skb->dev, skb, c, 0); + rcu_read_unlock(); } } } @@@ -1546,8 -1548,7 +1548,8 @@@ out }
/* Getsock opt support for the multicast routing system. */ -int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) +int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@@ -1578,14 -1579,14 +1580,14 @@@ return -ENOPROTOOPT; }
- if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT; olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; - if (put_user(olr, optlen)) + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff --combined net/ipv4/tcp.c index 829beee3fa32,e373dde1f46f..5702ca9b952d --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -1761,19 -1761,28 +1761,28 @@@ int tcp_read_skb(struct sock *sk, skb_r if (sk->sk_state == TCP_LISTEN) return -ENOTCONN;
- skb = tcp_recv_skb(sk, seq, &offset); - if (!skb) - return 0; + while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { + u8 tcp_flags; + int used;
- __skb_unlink(skb, &sk->sk_receive_queue); - WARN_ON(!skb_set_owner_sk_safe(skb, sk)); - copied = recv_actor(sk, skb); - if (copied >= 0) { - seq += copied; - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + __skb_unlink(skb, &sk->sk_receive_queue); + WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); + tcp_flags = TCP_SKB_CB(skb)->tcp_flags; + used = recv_actor(sk, skb); + consume_skb(skb); + if (used < 0) { + if (!copied) + copied = used; + break; + } + seq += used; + copied += used; + + if (tcp_flags & TCPHDR_FIN) { ++seq; + break; + } } - consume_skb(skb); WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk); @@@ -3199,7 -3208,7 +3208,7 @@@ EXPORT_SYMBOL(tcp_disconnect)
static inline bool tcp_can_repair_sock(const struct sock *sk) { - return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && + return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && (sk->sk_state != TCP_LISTEN); }
@@@ -3476,8 -3485,8 +3485,8 @@@ int tcp_set_window_clamp(struct sock *s /* * Socket option code for TCP. */ -static int do_tcp_setsockopt(struct sock *sk, int level, int optname, - sockptr_t optval, unsigned int optlen) +int do_tcp_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); @@@ -3499,11 -3508,11 +3508,11 @@@ return -EFAULT; name[val] = 0;
- lock_sock(sk); - err = tcp_set_congestion_control(sk, name, true, - ns_capable(sock_net(sk)->user_ns, - CAP_NET_ADMIN)); - release_sock(sk); + sockopt_lock_sock(sk); + err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), + sockopt_ns_capable(sock_net(sk)->user_ns, + CAP_NET_ADMIN)); + sockopt_release_sock(sk); return err; } case TCP_ULP: { @@@ -3519,9 -3528,9 +3528,9 @@@ return -EFAULT; name[val] = 0;
- lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_set_ulp(sk, name); - release_sock(sk); + sockopt_release_sock(sk); return err; } case TCP_FASTOPEN_KEY: { @@@ -3554,7 -3563,7 +3563,7 @@@ if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT;
- lock_sock(sk); + sockopt_lock_sock(sk);
switch (optname) { case TCP_MAXSEG: @@@ -3776,7 -3785,7 +3785,7 @@@ break; }
- release_sock(sk); + sockopt_release_sock(sk); return err; }
@@@ -4040,15 -4049,15 +4049,15 @@@ struct sk_buff *tcp_get_timestamping_op return stats; }
-static int do_tcp_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, int __user *optlen) +int do_tcp_getsockopt(struct sock *sk, int level, + int optname, sockptr_t optval, sockptr_t optlen) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); int val, len;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
len = min_t(unsigned int, len, sizeof(int)); @@@ -4098,15 -4107,15 +4107,15 @@@ case TCP_INFO: { struct tcp_info info;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
tcp_get_info(sk, &info);
len = min_t(unsigned int, len, sizeof(info)); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@@ -4116,7 -4125,7 +4125,7 @@@ size_t sz = 0; int attr;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
ca_ops = icsk->icsk_ca_ops; @@@ -4124,9 -4133,9 +4133,9 @@@ sz = ca_ops->get_info(sk, ~0U, &attr, &info);
len = min_t(unsigned int, len, sz); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@@ -4135,28 -4144,27 +4144,28 @@@ break;
case TCP_CONGESTION: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_CA_NAME_MAX); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) return -EFAULT; return 0;
case TCP_ULP: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); if (!icsk->icsk_ulp_ops) { - if (put_user(0, optlen)) + len = 0; + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; return 0; } - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) return -EFAULT; return 0;
@@@ -4164,15 -4172,15 +4173,15 @@@ u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; unsigned int key_len;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
key_len = tcp_fastopen_get_cipher(net, icsk, key) * TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, key, len)) + if (copy_to_sockptr(optval, key, len)) return -EFAULT; return 0; } @@@ -4198,7 -4206,7 +4207,7 @@@ case TCP_REPAIR_WINDOW: { struct tcp_repair_window opt;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
if (len != sizeof(opt)) @@@ -4213,7 -4221,7 +4222,7 @@@ opt.rcv_wnd = tp->rcv_wnd; opt.rcv_wup = tp->rcv_wup;
- if (copy_to_user(optval, &opt, len)) + if (copy_to_sockptr(optval, &opt, len)) return -EFAULT; return 0; } @@@ -4259,35 -4267,35 +4268,35 @@@ val = tp->save_syn; break; case TCP_SAVED_SYN: { - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
- lock_sock(sk); + sockopt_lock_sock(sk); if (tp->saved_syn) { if (len < tcp_saved_syn_len(tp->saved_syn)) { - if (put_user(tcp_saved_syn_len(tp->saved_syn), - optlen)) { - release_sock(sk); + len = tcp_saved_syn_len(tp->saved_syn); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - release_sock(sk); + sockopt_release_sock(sk); return -EINVAL; } len = tcp_saved_syn_len(tp->saved_syn); - if (put_user(len, optlen)) { - release_sock(sk); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - if (copy_to_user(optval, tp->saved_syn->data, len)) { - release_sock(sk); + if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { + sockopt_release_sock(sk); return -EFAULT; } tcp_saved_syn_free(tp); - release_sock(sk); + sockopt_release_sock(sk); } else { - release_sock(sk); + sockopt_release_sock(sk); len = 0; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } return 0; @@@ -4298,31 -4306,31 +4307,31 @@@ struct tcp_zerocopy_receive zc = {}; int err;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0 || len < offsetofend(struct tcp_zerocopy_receive, length)) return -EINVAL; if (unlikely(len > sizeof(zc))) { - err = check_zeroed_user(optval + sizeof(zc), - len - sizeof(zc)); + err = check_zeroed_sockptr(optval, sizeof(zc), + len - sizeof(zc)); if (err < 1) return err == 0 ? -EINVAL : err; len = sizeof(zc); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } - if (copy_from_user(&zc, optval, len)) + if (copy_from_sockptr(&zc, optval, len)) return -EFAULT; if (zc.reserved) return -EINVAL; if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) return -EINVAL; - lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_zerocopy_receive(sk, &zc, &tss); err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); - release_sock(sk); + sockopt_release_sock(sk); if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) goto zerocopy_rcv_cmsg; switch (len) { @@@ -4352,7 -4360,7 +4361,7 @@@ zerocopy_rcv_sk_err zerocopy_rcv_inq: zc.inq = tcp_inq_hint(sk); zerocopy_rcv_out: - if (!err && copy_to_user(optval, &zc, len)) + if (!err && copy_to_sockptr(optval, &zc, len)) err = -EFAULT; return err; } @@@ -4361,9 -4369,9 +4370,9 @@@ return -ENOPROTOOPT; }
- if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; return 0; } @@@ -4388,8 -4396,7 +4397,8 @@@ int tcp_getsockopt(struct sock *sk, in if (level != SOL_TCP) return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); - return do_tcp_getsockopt(sk, level, optname, optval, optlen); + return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), + USER_SOCKPTR(optlen)); } EXPORT_SYMBOL(tcp_getsockopt);
@@@ -4435,16 -4442,12 +4444,16 @@@ static void __tcp_alloc_md5sig_pool(voi * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool_populated = true; + /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() + * and tcp_get_md5sig_pool(). + */ + WRITE_ONCE(tcp_md5sig_pool_populated, true); }
bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool_populated)) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool_populated) { @@@ -4455,8 -4458,7 +4464,8 @@@
mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool_populated; + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + return READ_ONCE(tcp_md5sig_pool_populated); } EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
@@@ -4472,8 -4474,7 +4481,8 @@@ struct tcp_md5sig_pool *tcp_get_md5sig_ { local_bh_disable();
- if (tcp_md5sig_pool_populated) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (READ_ONCE(tcp_md5sig_pool_populated)) { /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ smp_rmb(); return this_cpu_ptr(&tcp_md5sig_pool); @@@ -4744,12 -4745,6 +4753,12 @@@ void __init tcp_init(void SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); + tcp_hashinfo.bind2_bucket_cachep = + kmem_cache_create("tcp_bind2_bucket", + sizeof(struct inet_bind2_bucket), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC | + SLAB_ACCOUNT, + NULL);
/* Size and allocate the main established and bind bucket * hash tables. @@@ -4773,7 -4768,7 +4782,7 @@@ panic("TCP: failed to alloc ehash_locks"); tcp_hashinfo.bhash = alloc_large_system_hash("TCP bind", - sizeof(struct inet_bind_hashbucket), + 2 * sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_mask + 1, 17, /* one slot per 128 KB of memory */ 0, @@@ -4782,15 -4777,11 +4791,15 @@@ 0, 64 * 1024); tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; + tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); + spin_lock_init(&tcp_hashinfo.bhash2[i].lock); + INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); }
+ tcp_hashinfo.pernet = false;
cnt = tcp_hashinfo.ehash_mask + 1; sysctl_tcp_max_orphans = cnt / 2; diff --combined net/ipv6/af_inet6.c index 19732b5dce23,dbb1430d6cc2..d40b7d60e00e --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@@ -1057,8 -1057,6 +1057,8 @@@ static const struct ipv6_stub ipv6_stub static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = { .inet6_bind = __inet6_bind, .udp6_lib_lookup = __udp6_lib_lookup, + .ipv6_setsockopt = do_ipv6_setsockopt, + .ipv6_getsockopt = do_ipv6_getsockopt, };
static int __init inet6_init(void) @@@ -1072,13 -1070,13 +1072,13 @@@ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r);
+ raw_hashinfo_init(&raw_v6_hashinfo); + if (disable_ipv6_mod) { pr_info("Loaded, but administratively disabled, reboot required to enable\n"); goto out; }
- raw_hashinfo_init(&raw_v6_hashinfo); - err = proto_register(&tcpv6_prot, 1); if (err) goto out; diff --combined net/ipv6/ip6mr.c index 516e83b52f26,858fd8a28b5b..facdc78a43e5 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@@ -1028,8 -1028,11 +1028,11 @@@ static void ip6mr_cache_resolve(struct ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE; } rtnl_unicast(skb, net, NETLINK_CB(skb).portid); - } else + } else { + rcu_read_lock(); ip6_mr_forward(net, mrt, skb->dev, skb, c); + rcu_read_unlock(); + } } }
@@@ -1827,8 -1830,8 +1830,8 @@@ int ip6_mroute_setsockopt(struct sock * * Getsock opt support for the multicast routing system. */
-int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, - int __user *optlen) +int ip6_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@@ -1859,16 -1862,16 +1862,16 @@@ return -ENOPROTOOPT; }
- if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT;
olr = min_t(int, olr, sizeof(int)); if (olr < 0) return -EINVAL;
- if (put_user(olr, optlen)) + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff --combined net/mptcp/protocol.c index 47931f6cf387,969b33a9dd64..866dfad3cde6 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@@ -150,9 -150,15 +150,15 @@@ static bool mptcp_try_coalesce(struct s MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, to->len, MPTCP_SKB_CB(from)->end_seq); MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; - kfree_skb_partial(from, fragstolen); + + /* note the fwd memory can reach a negative value after accounting + * for the delta, but the later skb free will restore a non + * negative one + */ atomic_add(delta, &sk->sk_rmem_alloc); mptcp_rmem_charge(sk, delta); + kfree_skb_partial(from, fragstolen); + return true; }
@@@ -1538,9 -1544,8 +1544,9 @@@ void __mptcp_push_pending(struct sock * struct mptcp_sendmsg_info info = { .flags = flags, }; + bool do_check_data_fin = false; struct mptcp_data_frag *dfrag; - int len, copied = 0; + int len;
while ((dfrag = mptcp_send_head(sk))) { info.sent = dfrag->already_sent; @@@ -1575,8 -1580,8 +1581,8 @@@ goto out; }
+ do_check_data_fin = true; info.sent += ret; - copied += ret; len -= ret;
mptcp_update_post_push(msk, dfrag, ret); @@@ -1592,7 -1597,7 +1598,7 @@@ out /* ensure the rtx timer is running */ if (!mptcp_timer_pending(sk)) mptcp_reset_timer(sk); - if (copied) + if (do_check_data_fin) __mptcp_check_send_data_fin(sk); }
@@@ -2358,7 -2363,7 +2364,7 @@@ static void __mptcp_close_subflow(struc
might_sleep();
- list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (inet_sk_state_load(ssk) != TCP_CLOSE) @@@ -2401,7 -2406,7 +2407,7 @@@ static void mptcp_check_fastclose(struc
mptcp_token_destroy(msk);
- list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); bool slow;
@@@ -3048,7 -3053,7 +3054,7 @@@ void mptcp_destroy_common(struct mptcp_ __mptcp_clear_xmit(sk);
/* join list will be eventually flushed (with rst) at sock lock release time */ - list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) + mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */ diff --combined net/netfilter/nf_tables_api.c index 4de0a30d2009,63c70141b3e5..a0653a8dfa82 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -742,7 -742,7 +742,7 @@@ __printf(2, 3) int nft_request_module(s return -ENOMEM;
req->done = false; - strlcpy(req->module, module_name, MODULE_NAME_LEN); + strscpy(req->module, module_name, MODULE_NAME_LEN); list_add_tail(&req->list, &nft_net->module_list);
return -EAGAIN; @@@ -2197,7 -2197,6 +2197,6 @@@ static int nf_tables_addchain(struct nf struct netlink_ext_ack *extack) { const struct nlattr * const *nla = ctx->nla; - struct nft_stats __percpu *stats = NULL; struct nft_table *table = ctx->table; struct nft_base_chain *basechain; struct net *net = ctx->net; @@@ -2212,6 -2211,7 +2211,7 @@@ return -EOVERFLOW;
if (nla[NFTA_CHAIN_HOOK]) { + struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook;
if (flags & NFT_CHAIN_BINDING) @@@ -2243,8 -2243,11 +2243,11 @@@ if (err < 0) { nft_chain_release_hook(&hook); kfree(basechain); + free_percpu(stats); return err; } + if (stats) + static_branch_inc(&nft_counters_enabled); } else { if (flags & NFT_CHAIN_BASE) return -EINVAL; @@@ -2319,9 -2322,6 +2322,6 @@@ goto err_unregister_hook; }
- if (stats) - static_branch_inc(&nft_counters_enabled); - table->use++;
return 0; diff --combined net/sched/cls_api.c index 5d0d57dc5cb7,51d175f3fbcb..50566db45949 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@@ -1977,6 -1977,9 +1977,6 @@@ static int tc_new_tfilter(struct sk_buf bool rtnl_held = false; u32 flags;
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - replay: tp_created = 0;
@@@ -2134,6 -2137,7 +2134,7 @@@ }
if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { + tfilter_put(tp, fh); NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); err = -EINVAL; goto errout; @@@ -2205,6 -2209,9 +2206,6 @@@ static int tc_del_tfilter(struct sk_buf int err; bool rtnl_held = false;
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) @@@ -2820,6 -2827,10 +2821,6 @@@ static int tc_ctl_chain(struct sk_buff unsigned long cl; int err;
- if (n->nlmsg_type != RTM_GETCHAIN && - !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - replay: q = NULL; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, @@@ -3629,6 -3640,9 +3630,6 @@@ int tcf_qevent_init(struct tcf_qevent * if (err) return err;
- if (!block_index) - return 0; - qe->info.binder_type = binder_type; qe->info.chain_head_change = tcf_chain_head_change_dflt; qe->info.chain_head_change_priv = &qe->filter_chain; diff --combined net/sched/sch_taprio.c index b72c373edea0,86675a79da1e..8ae454052201 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@@ -67,6 -67,7 +67,7 @@@ struct taprio_sched u32 flags; enum tk_offsets tk_offset; int clockid; + bool offloaded; atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ * speeds it's sub-nanoseconds per byte */ @@@ -78,6 -79,8 +79,6 @@@ struct sched_gate_list __rcu *admin_sched; struct hrtimer advance_timer; struct list_head taprio_list; - struct sk_buff *(*dequeue)(struct Qdisc *sch); - struct sk_buff *(*peek)(struct Qdisc *sch); u32 txtime_delay; };
@@@ -432,9 -435,6 +433,9 @@@ static int taprio_enqueue_one(struct sk return qdisc_enqueue(skb, child, to_free); }
+/* Will not be called in the full offload case, since the TX queues are + * attached to the Qdisc created using qdisc_create_dflt() + */ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { @@@ -442,6 -442,11 +443,6 @@@ struct Qdisc *child; int queue;
- if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) { - WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n"); - return qdisc_drop(skb, sch, to_free); - } - queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue]; @@@ -450,10 -455,10 +451,10 @@@
/* Large packets might not be transmitted when the transmission duration * exceeds any configured interval. Therefore, segment the skb into - * smaller chunks. Skip it for the full offload case, as the driver - * and/or the hardware is expected to handle this. + * smaller chunks. Drivers with full offload are expected to handle + * this in hardware. */ - if (skb_is_gso(skb) && !FULL_OFFLOAD_IS_ENABLED(q->flags)) { + if (skb_is_gso(skb)) { unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); netdev_features_t features = netif_skb_features(skb); struct sk_buff *segs, *nskb; @@@ -487,10 -492,7 +488,10 @@@ return taprio_enqueue_one(skb, sch, child, to_free); }
-static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) +/* Will not be called in the full offload case, since the TX queues are + * attached to the Qdisc created using qdisc_create_dflt() + */ +static struct sk_buff *taprio_peek(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@@ -534,6 -536,20 +535,6 @@@ return NULL; }
-static struct sk_buff *taprio_peek_offload(struct Qdisc *sch) -{ - WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n"); - - return NULL; -} - -static struct sk_buff *taprio_peek(struct Qdisc *sch) -{ - struct taprio_sched *q = qdisc_priv(sch); - - return q->peek(sch); -} - static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) { atomic_set(&entry->budget, @@@ -541,10 -557,7 +542,10 @@@ atomic64_read(&q->picos_per_byte))); }
-static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) +/* Will not be called in the full offload case, since the TX queues are + * attached to the Qdisc created using qdisc_create_dflt() + */ +static struct sk_buff *taprio_dequeue(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@@ -632,6 -645,20 +633,6 @@@ done return skb; }
-static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch) -{ - WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n"); - - return NULL; -} - -static struct sk_buff *taprio_dequeue(struct Qdisc *sch) -{ - struct taprio_sched *q = qdisc_priv(sch); - - return q->dequeue(sch); -} - static bool should_restart_cycle(const struct sched_gate_list *oper, const struct sched_entry *entry) { @@@ -1167,10 -1194,16 +1168,10 @@@ static void taprio_offload_config_chang { struct sched_gate_list *oper, *admin;
- spin_lock(&q->current_entry_lock); - - oper = rcu_dereference_protected(q->oper_sched, - lockdep_is_held(&q->current_entry_lock)); - admin = rcu_dereference_protected(q->admin_sched, - lockdep_is_held(&q->current_entry_lock)); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched);
switch_schedules(q, &admin, &oper); - - spin_unlock(&q->current_entry_lock); }
static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) @@@ -1247,6 -1280,8 +1248,8 @@@ static int taprio_enable_offload(struc goto done; }
+ q->offloaded = true; + done: taprio_offload_free(offload);
@@@ -1261,12 -1296,9 +1264,9 @@@ static int taprio_disable_offload(struc struct tc_taprio_qopt_offload *offload; int err;
- if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) + if (!q->offloaded) return 0;
- if (!ops->ndo_setup_tc) - return -EOPNOTSUPP; - offload = taprio_offload_alloc(0); if (!offload) { NL_SET_ERR_MSG(extack, @@@ -1282,6 -1314,8 +1282,8 @@@ goto out; }
+ q->offloaded = false; + out: taprio_offload_free(offload);
@@@ -1458,8 -1492,10 +1460,8 @@@ static int taprio_change(struct Qdisc * } INIT_LIST_HEAD(&new_admin->entries);
- rcu_read_lock(); - oper = rcu_dereference(q->oper_sched); - admin = rcu_dereference(q->admin_sched); - rcu_read_unlock(); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched);
/* no changes - no new mqprio settings */ if (!taprio_mqprio_cmp(dev, mqprio)) @@@ -1529,6 -1565,17 +1531,6 @@@ q->advance_timer.function = advance_sched; }
- if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { - q->dequeue = taprio_dequeue_offload; - q->peek = taprio_peek_offload; - } else { - /* Be sure to always keep the function pointers - * in a consistent state. - */ - q->dequeue = taprio_dequeue_soft; - q->peek = taprio_peek_soft; - } - err = taprio_get_start_time(sch, new_admin, &start); if (err < 0) { NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); @@@ -1591,13 -1638,14 +1593,13 @@@ static void taprio_reset(struct Qdisc * if (q->qdiscs[i]) qdisc_reset(q->qdiscs[i]); } - sch->qstats.backlog = 0; - sch->q.qlen = 0; }
static void taprio_destroy(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); + struct sched_gate_list *oper, *admin; unsigned int i;
spin_lock(&taprio_list_lock); @@@ -1621,14 -1669,11 +1623,14 @@@
netdev_reset_tc(dev);
- if (q->oper_sched) - call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched); + + if (oper) + call_rcu(&oper->rcu, taprio_free_sched_cb);
- if (q->admin_sched) - call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb); + if (admin) + call_rcu(&admin->rcu, taprio_free_sched_cb); }
static int taprio_init(struct Qdisc *sch, struct nlattr *opt, @@@ -1643,6 -1688,9 +1645,6 @@@ hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); q->advance_timer.function = advance_sched;
- q->dequeue = taprio_dequeue_soft; - q->peek = taprio_peek_soft; - q->root = sch;
/* We only support static clockids. Use an invalid value as default @@@ -1655,15 -1703,11 +1657,15 @@@ list_add(&q->taprio_list, &taprio_list); spin_unlock(&taprio_list_lock);
- if (sch->parent != TC_H_ROOT) + if (sch->parent != TC_H_ROOT) { + NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); return -EOPNOTSUPP; + }
- if (!netif_is_multiqueue(dev)) + if (!netif_is_multiqueue(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); return -EOPNOTSUPP; + }
/* pre-allocate qdisc, attachment can't fail */ q->qdiscs = kcalloc(dev->num_tx_queues, @@@ -1844,8 -1888,9 +1846,8 @@@ static int taprio_dump(struct Qdisc *sc struct nlattr *nest, *sched_nest; unsigned int i;
- rcu_read_lock(); - oper = rcu_dereference(q->oper_sched); - admin = rcu_dereference(q->admin_sched); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched);
opt.num_tc = netdev_get_num_tc(dev); memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); @@@ -1889,6 -1934,8 +1891,6 @@@ nla_nest_end(skb, sched_nest);
done: - rcu_read_unlock(); - return nla_nest_end(skb, nest);
admin_error: @@@ -1898,17 -1945,20 +1900,19 @@@ options_error nla_nest_cancel(skb, nest);
start_error: - rcu_read_unlock(); return -ENOSPC; }
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) { - struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + unsigned int ntx = cl - 1;
- if (!dev_queue) + if (ntx >= dev->num_tx_queues) return NULL;
- return dev_queue->qdisc_sleeping; + return q->qdiscs[ntx]; }
static unsigned long taprio_find(struct Qdisc *sch, u32 classid) diff --combined net/smc/smc_core.c index ea41f224f644,df89c2e08cbf..e6ee797640b4 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@@ -2239,7 -2239,7 +2239,7 @@@ out static int smcr_buf_map_usable_links(struct smc_link_group *lgr, struct smc_buf_desc *buf_desc, bool is_rmb) { - int i, rc = 0; + int i, rc = 0, cnt = 0;
/* protect against parallel link reconfiguration */ mutex_lock(&lgr->llc_conf_mutex); @@@ -2252,9 -2252,12 +2252,12 @@@ rc = -ENOMEM; goto out; } + cnt++; } out: mutex_unlock(&lgr->llc_conf_mutex); + if (!rc && !cnt) + rc = -EINVAL; return rc; }
@@@ -2307,10 -2310,10 +2310,10 @@@ static int __smc_buf_create(struct smc_
if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ - sk_buf_size = smc->sk.sk_rcvbuf / 2; + sk_buf_size = smc->sk.sk_rcvbuf; else /* use socket send buffer size (w/o overhead) as start value */ - sk_buf_size = smc->sk.sk_sndbuf / 2; + sk_buf_size = smc->sk.sk_sndbuf;
for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb); bufsize_short >= 0; bufsize_short--) { @@@ -2369,7 -2372,7 +2372,7 @@@ if (is_rmb) { conn->rmb_desc = buf_desc; conn->rmbe_size_short = bufsize_short; - smc->sk.sk_rcvbuf = bufsize * 2; + smc->sk.sk_rcvbuf = bufsize; atomic_set(&conn->bytes_to_rcv, 0); conn->rmbe_update_limit = smc_rmb_wnd_update_limit(buf_desc->len); @@@ -2377,7 -2380,7 +2380,7 @@@ smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { conn->sndbuf_desc = buf_desc; - smc->sk.sk_sndbuf = bufsize * 2; + smc->sk.sk_sndbuf = bufsize; atomic_set(&conn->sndbuf_space, bufsize); } return 0; diff --combined tools/testing/selftests/drivers/net/bonding/Makefile index d209f7a98b6c,1d866658e541..d14846fcf3d1 --- a/tools/testing/selftests/drivers/net/bonding/Makefile +++ b/tools/testing/selftests/drivers/net/bonding/Makefile @@@ -1,7 -1,10 +1,11 @@@ # SPDX-License-Identifier: GPL-2.0 # Makefile for net selftests
- TEST_PROGS := bond-break-lacpdu-tx.sh - TEST_PROGS += bond-lladdr-target.sh -TEST_PROGS := bond-break-lacpdu-tx.sh \ - dev_addr_lists.sh \ - bond-arp-interval-causes-panic.sh ++TEST_PROGS := \ ++ bond-arp-interval-causes-panic.sh \ ++ bond-break-lacpdu-tx.sh \ ++ dev_addr_lists.sh + + TEST_FILES := lag_lib.sh
include ../../../lib.mk