The following commit has been merged in the master branch: commit b3f7e3f23a763ccaae7b52d88d2c91e66c80d406 Merge: 4ee9e6e027c06eb1dd1cdbe025d461e407ece755 7008ee121089b8193aea918b98850fe87d996508 Author: David S. Miller davem@davemloft.net Date: Sun Jan 19 22:10:04 2020 +0100
Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
diff --combined MAINTAINERS index 2549f10eb0b1,cf6ccca6e61c..702382b89c37 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -720,7 -720,7 +720,7 @@@ F: Documentation/devicetree/bindings/i2 F: drivers/i2c/busses/i2c-altera.c
ALTERA MAILBOX DRIVER - M: Ley Foon Tan lftan@altera.com + M: Ley Foon Tan ley.foon.tan@intel.com L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained F: drivers/mailbox/mailbox-altera.c @@@ -1407,7 -1407,7 +1407,7 @@@ T: git git://git.kernel.org/pub/scm/lin
ARM/ACTIONS SEMI ARCHITECTURE M: Andreas Färber afaerber@suse.de - R: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org + M: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: owl @@@ -3150,7 -3150,7 +3150,7 @@@ S: Maintaine F: arch/mips/net/
BPF JIT for NFP NICs - M: Jakub Kicinski jakub.kicinski@netronome.com + M: Jakub Kicinski kuba@kernel.org L: netdev@vger.kernel.org L: bpf@vger.kernel.org S: Supported @@@ -4848,7 -4848,6 +4848,7 @@@ S: Supporte F: net/core/devlink.c F: include/net/devlink.h F: include/uapi/linux/devlink.h +F: Documentation/networking/devlink
DIALOG SEMICONDUCTOR DRIVERS M: Support Opensource support.opensource@diasemi.com @@@ -9890,7 -9889,7 +9890,7 @@@ S: Maintaine F: drivers/net/dsa/mv88e6xxx/ F: include/linux/platform_data/mv88e6xxx.h F: Documentation/devicetree/bindings/net/dsa/marvell.txt -F: Documentation/networking/devlink-params-mv88e6xxx.txt +F: Documentation/networking/devlink/mv88e6xxx.rst
MARVELL ARMADA DRM SUPPORT M: Russell King linux@armlinux.org.uk @@@ -9961,7 -9960,7 +9961,7 @@@ F: drivers/net/ethernet/marvell/mvneta. MARVELL MWIFIEX WIRELESS DRIVER M: Amitkumar Karwar amitkarwar@gmail.com M: Nishant Sarmukadam nishants@marvell.com -M: Ganapathi Bhat gbhat@marvell.com +M: Ganapathi Bhat ganapathi.bhat@nxp.com M: Xinming Hu huxinming820@gmail.com L: linux-wireless@vger.kernel.org S: Maintained @@@ -11432,7 -11431,7 +11432,7 @@@ F: include/uapi/linux/netrom. F: net/netrom/
NETRONOME ETHERNET DRIVERS - M: Jakub Kicinski jakub.kicinski@netronome.com + M: Jakub Kicinski kuba@kernel.org L: oss-drivers@netronome.com S: Maintained F: drivers/net/ethernet/netronome/ @@@ -11574,16 -11573,6 +11574,16 @@@ F: net/ipv6/calipso. F: net/netfilter/xt_CONNSECMARK.c F: net/netfilter/xt_SECMARK.c
+NETWORKING [MPTCP] +M: Mat Martineau mathew.j.martineau@linux.intel.com +M: Matthieu Baerts matthieu.baerts@tessares.net +L: netdev@vger.kernel.org +L: mptcp@lists.01.org +W: https://github.com/multipath-tcp/mptcp_net-next/wiki +B: https://github.com/multipath-tcp/mptcp_net-next/issues +S: Maintained +F: include/net/mptcp.h + NETWORKING [TCP] M: Eric Dumazet edumazet@google.com L: netdev@vger.kernel.org @@@ -11602,7 -11591,7 +11602,7 @@@ M: Boris Pismenny <borisp@mellanox.com M: Aviad Yehezkel aviadye@mellanox.com M: John Fastabend john.fastabend@gmail.com M: Daniel Borkmann daniel@iogearbox.net - M: Jakub Kicinski jakub.kicinski@netronome.com + M: Jakub Kicinski kuba@kernel.org L: netdev@vger.kernel.org S: Maintained F: net/tls/* @@@ -11614,7 -11603,7 +11614,7 @@@ L: linux-wireless@vger.kernel.or Q: http://patchwork.kernel.org/project/linux-wireless/list/
NETDEVSIM - M: Jakub Kicinski jakub.kicinski@netronome.com + M: Jakub Kicinski kuba@kernel.org S: Maintained F: drivers/net/netdevsim/*
@@@ -11691,7 -11680,7 +11691,7 @@@ F: Documentation/scsi/NinjaSCSI.tx F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE - M: Ley Foon Tan lftan@altera.com + M: Ley Foon Tan ley.foon.tan@intel.com L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git S: Maintained @@@ -12575,7 -12564,7 +12575,7 @@@ F: Documentation/devicetree/bindings/pc F: drivers/pci/controller/pci-aardvark.c
PCI DRIVER FOR ALTERA PCIE IP - M: Ley Foon Tan lftan@altera.com + M: Ley Foon Tan ley.foon.tan@intel.com L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported @@@ -12754,7 -12743,7 +12754,7 @@@ S: Supporte F: Documentation/PCI/pci-error-recovery.rst
PCI MSI DRIVER FOR ALTERA MSI IP - M: Ley Foon Tan lftan@altera.com + M: Ley Foon Tan ley.foon.tan@intel.com L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported @@@ -13660,13 -13649,6 +13660,13 @@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: drivers/net/wireless/ath/ath10k/
+QUALCOMM ATHEROS ATH11K WIRELESS DRIVER +M: Kalle Valo kvalo@codeaurora.org +L: ath11k@lists.infradead.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git +S: Supported +F: drivers/net/wireless/ath/ath11k/ + QUALCOMM ATHEROS ATH9K WIRELESS DRIVER M: QCA ath9k Development ath9k-devel@qca.qualcomm.com L: linux-wireless@vger.kernel.org @@@ -15786,7 -15768,6 +15786,7 @@@ M: Jose Abreu <joabreu@synopsys.com L: netdev@vger.kernel.org W: http://www.stlinux.com S: Supported +F: Documentation/networking/device_drivers/stmicro/ F: drivers/net/ethernet/stmicro/stmmac/
SUN3/3X @@@ -17516,7 -17497,6 +17516,7 @@@ F: net/vmw_vsock/diag. F: net/vmw_vsock/af_vsock_tap.c F: net/vmw_vsock/virtio_transport_common.c F: net/vmw_vsock/virtio_transport.c +F: net/vmw_vsock/vsock_loopback.c F: drivers/net/vsockmon.c F: drivers/vhost/vsock.c F: tools/testing/vsock/ @@@ -17887,14 -17867,6 +17887,14 @@@ L: linux-gpio@vger.kernel.or S: Maintained F: drivers/gpio/gpio-ws16c48.c
+WIREGUARD SECURE NETWORK TUNNEL +M: Jason A. Donenfeld Jason@zx2c4.com +S: Maintained +F: drivers/net/wireguard/ +F: tools/testing/selftests/wireguard/ +L: wireguard@lists.zx2c4.com +L: netdev@vger.kernel.org + WISTRON LAPTOP BUTTON DRIVER M: Miloslav Trmac mitr@volny.cz S: Maintained @@@ -18070,7 -18042,7 +18070,7 @@@ XDP (eXpress Data Path M: Alexei Starovoitov ast@kernel.org M: Daniel Borkmann daniel@iogearbox.net M: David S. Miller davem@davemloft.net - M: Jakub Kicinski jakub.kicinski@netronome.com + M: Jakub Kicinski kuba@kernel.org M: Jesper Dangaard Brouer hawk@kernel.org M: John Fastabend john.fastabend@gmail.com L: netdev@vger.kernel.org diff --combined arch/arm64/Kconfig index 29d03459de20,e688dfad0b72..e2c758df08b4 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@@ -69,7 -69,6 +69,7 @@@ config ARM6 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT + select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) @@@ -139,6 -138,7 +139,7 @@@ select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS diff --combined drivers/net/dsa/sja1105/sja1105_main.c index 784e6b8166a0,bb91f3d17cf2..03ba6d25f7fe --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@@ -426,6 -426,14 +426,6 @@@ static int sja1105_init_general_params( .tpid2 = ETH_P_SJA1105, }; struct sja1105_table *table; - int i, k = 0; - - for (i = 0; i < SJA1105_NUM_PORTS; i++) { - if (dsa_is_dsa_port(priv->ds, i)) - default_general_params.casc_port = i; - else if (dsa_is_user_port(priv->ds, i)) - priv->ports[i].mgmt_slot = k++; - }
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@@ -574,7 -582,7 +574,7 @@@ static int sja1105_parse_ports_node(str struct device *dev = &priv->spidev->dev; struct device_node *child;
- for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { struct device_node *phy_node; phy_interface_t phy_mode; u32 index; @@@ -1534,8 -1542,7 +1534,8 @@@ static int sja1105_setup_8021q_tagging( }
static enum dsa_tag_protocol -sja1105_get_tag_protocol(struct dsa_switch *ds, int port) +sja1105_get_tag_protocol(struct dsa_switch *ds, int port, + enum dsa_tag_protocol mp) { return DSA_TAG_PROTO_SJA1105; } @@@ -1733,16 -1740,6 +1733,16 @@@ static int sja1105_setup(struct dsa_swi static void sja1105_teardown(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; + int port; + + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + struct sja1105_port *sp = &priv->ports[port]; + + if (!dsa_is_user_port(ds, port)) + continue; + + kthread_destroy_worker(sp->xmit_worker); + }
sja1105_tas_teardown(ds); sja1105_ptp_clock_unregister(ds); @@@ -1764,18 -1761,6 +1764,18 @@@ static int sja1105_port_enable(struct d return 0; }
+static void sja1105_port_disable(struct dsa_switch *ds, int port) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_port *sp = &priv->ports[port]; + + if (!dsa_is_user_port(ds, port)) + return; + + kthread_cancel_work_sync(&sp->xmit_work); + skb_queue_purge(&sp->xmit_queue); +} + static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, struct sk_buff *skb, bool takets) { @@@ -1834,36 -1819,47 +1834,36 @@@ return NETDEV_TX_OK; }
+#define work_to_port(work) \ + container_of((work), struct sja1105_port, xmit_work) +#define tagger_to_sja1105(t) \ + container_of((t), struct sja1105_private, tagger_data) + /* Deferred work is unfortunately necessary because setting up the management * route cannot be done from atomit context (SPI transfer takes a sleepable * lock on the bus) */ -static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, - struct sk_buff *skb) +static void sja1105_port_deferred_xmit(struct kthread_work *work) { - struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; - int slot = sp->mgmt_slot; - struct sk_buff *clone; - - /* The tragic fact about the switch having 4x2 slots for installing - * management routes is that all of them except one are actually - * useless. - * If 2 slots are simultaneously configured for two BPDUs sent to the - * same (multicast) DMAC but on different egress ports, the switch - * would confuse them and redirect first frame it receives on the CPU - * port towards the port configured on the numerically first slot - * (therefore wrong port), then second received frame on second slot - * (also wrong port). - * So for all practical purposes, there needs to be a lock that - * prevents that from happening. The slot used here is utterly useless - * (could have simply been 0 just as fine), but we are doing it - * nonetheless, in case a smarter idea ever comes up in the future. - */ - mutex_lock(&priv->mgmt_lock); + struct sja1105_port *sp = work_to_port(work); + struct sja1105_tagger_data *tagger_data = sp->data; + struct sja1105_private *priv = tagger_to_sja1105(tagger_data); + int port = sp - priv->ports; + struct sk_buff *skb;
- /* The clone, if there, was made by dsa_skb_tx_timestamp */ - clone = DSA_SKB_CB(skb)->clone; + while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { + struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
- sja1105_mgmt_xmit(ds, port, slot, skb, !!clone); + mutex_lock(&priv->mgmt_lock);
- if (!clone) - goto out; + sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
- sja1105_ptp_txtstamp_skb(ds, port, clone); + /* The clone, if there, was made by dsa_skb_tx_timestamp */ + if (clone) + sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
-out: - mutex_unlock(&priv->mgmt_lock); - return NETDEV_TX_OK; + mutex_unlock(&priv->mgmt_lock); + } }
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table, @@@ -1994,7 -1990,6 +1994,7 @@@ static const struct dsa_switch_ops sja1 .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, .port_enable = sja1105_port_enable, + .port_disable = sja1105_port_disable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, .port_fdb_del = sja1105_fdb_del, @@@ -2008,6 -2003,7 +2008,6 @@@ .port_mdb_prepare = sja1105_mdb_prepare, .port_mdb_add = sja1105_mdb_add, .port_mdb_del = sja1105_mdb_del, - .port_deferred_xmit = sja1105_port_deferred_xmit, .port_hwtstamp_get = sja1105_hwtstamp_get, .port_hwtstamp_set = sja1105_hwtstamp_set, .port_rxtstamp = sja1105_port_rxtstamp, @@@ -2059,7 -2055,7 +2059,7 @@@ static int sja1105_probe(struct spi_dev struct device *dev = &spi->dev; struct sja1105_private *priv; struct dsa_switch *ds; - int rc, i; + int rc, port;
if (!dev->of_node) { dev_err(dev, "No DTS bindings for SJA1105 driver\n"); @@@ -2124,42 -2120,15 +2124,42 @@@ return rc;
/* Connections between dsa_port and sja1105_port */ - for (i = 0; i < SJA1105_NUM_PORTS; i++) { - struct sja1105_port *sp = &priv->ports[i]; + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + struct sja1105_port *sp = &priv->ports[port]; + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *slave;
- dsa_to_port(ds, i)->priv = sp; - sp->dp = dsa_to_port(ds, i); + if (!dsa_is_user_port(ds, port)) + continue; + + dp->priv = sp; + sp->dp = dp; sp->data = tagger_data; + slave = dp->slave; + kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); + sp->xmit_worker = kthread_create_worker(0, "%s_xmit", + slave->name); + if (IS_ERR(sp->xmit_worker)) { + rc = PTR_ERR(sp->xmit_worker); + dev_err(ds->dev, + "failed to create deferred xmit thread: %d\n", + rc); + goto out; + } + skb_queue_head_init(&sp->xmit_queue); }
return 0; +out: + while (port-- > 0) { + struct sja1105_port *sp = &priv->ports[port]; + + if (!dsa_is_user_port(ds, port)) + continue; + + kthread_destroy_worker(sp->xmit_worker); + } + return rc; }
static int sja1105_remove(struct spi_device *spi) diff --combined drivers/net/ethernet/broadcom/bcmsysport.c index 1907e47fd0af,d6b1a153f9df..f07ac0e0af59 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@@ -1354,7 -1354,7 +1354,7 @@@ out return ret; }
-static void bcm_sysport_tx_timeout(struct net_device *dev) +static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue) { netdev_warn(dev, "transmit timeout!\n");
@@@ -2323,7 -2323,7 +2323,7 @@@ static int bcm_sysport_map_queues(struc ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; - priv->ring_map[q + port * num_tx_queues] = ring; + priv->ring_map[qp + port * num_tx_queues] = ring; qp++; }
@@@ -2338,7 -2338,7 +2338,7 @@@ static int bcm_sysport_unmap_queues(str struct net_device *slave_dev; unsigned int num_tx_queues; struct net_device *dev; - unsigned int q, port; + unsigned int q, qp, port;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); if (priv->netdev != info->master) @@@ -2364,7 -2364,8 +2364,8 @@@ continue;
ring->inspect = false; - priv->ring_map[q + port * num_tx_queues] = NULL; + qp = ring->switch_queue; + priv->ring_map[qp + port * num_tx_queues] = NULL; }
return 0; @@@ -2427,14 -2428,6 +2428,14 @@@ static int bcm_sysport_probe(struct pla if (!of_id || !of_id->data) return -EINVAL;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret); + return ret; + } + /* Fairly quickly we need to know the type of adapter we have */ params = of_id->data;
diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index 33eb8cd6551e,e6f18f6070ef..198c69dceeef --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -944,7 -944,6 +944,7 @@@ static struct sk_buff *bnxt_rx_page_skb dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@@ -9976,7 -9975,7 +9976,7 @@@ static void bnxt_reset_task(struct bnx } }
-static void bnxt_tx_timeout(struct net_device *dev) +static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnxt *bp = netdev_priv(dev);
@@@ -10823,7 -10822,6 +10823,7 @@@ static void bnxt_fw_reset_task(struct w smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_ulp_start(bp, rc); + bnxt_dl_health_recovery_done(bp); bnxt_dl_health_status_update(bp, true); rtnl_unlock(); break; @@@ -11067,11 -11065,23 +11067,23 @@@ static bool bnxt_fltr_match(struct bnxt struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys;
- if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && - keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && - keys1->ports.ports == keys2->ports.ports && - keys1->basic.ip_proto == keys2->basic.ip_proto && - keys1->basic.n_proto == keys2->basic.n_proto && + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + return false; + } else { + if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src)) || + memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst))) + return false; + } + + if (keys1->ports.ports == keys2->ports.ports && keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) @@@ -11363,7 -11373,7 +11375,7 @@@ int bnxt_get_port_parent_id(struct net_ return -EOPNOTSUPP;
/* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP;
ppid->id_len = sizeof(bp->switch_id); @@@ -11736,6 -11746,7 +11748,7 @@@ static int bnxt_pcie_dsn_get(struct bnx put_unaligned_le32(dw, &dsn[0]); pci_read_config_dword(pdev, pos + 4, &dw); put_unaligned_le32(dw, &dsn[4]); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; }
@@@ -11847,9 -11858,7 +11860,7 @@@ static int bnxt_init_one(struct pci_de
if (BNXT_PF(bp)) { /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto init_err_pci_clean; + bnxt_pcie_dsn_get(bp, bp->switch_id); }
/* MTU range: 60 - FW defined max */ diff --combined drivers/net/ethernet/cadence/macb_main.c index 1c547ee0d444,f7d87c71aaa9..7a2fe63d1136 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@@ -337,30 -337,11 +337,30 @@@ static int macb_mdio_read(struct mii_bu if (status < 0) goto mdio_read_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) - | MACB_BF(RW, MACB_MAN_READ) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, regnum) - | MACB_BF(CODE, MACB_MAN_CODE))); + if (regnum & MII_ADDR_C45) { + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_ADDR) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, (regnum >> 16) & 0x1F) + | MACB_BF(DATA, regnum & 0xFFFF) + | MACB_BF(CODE, MACB_MAN_C45_CODE))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_read_exit; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_READ) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, (regnum >> 16) & 0x1F) + | MACB_BF(CODE, MACB_MAN_C45_CODE))); + } else { + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) + | MACB_BF(RW, MACB_MAN_C22_READ) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_C22_CODE))); + }
status = macb_mdio_wait_for_idle(bp); if (status < 0) @@@ -389,32 -370,12 +389,32 @@@ static int macb_mdio_write(struct mii_b if (status < 0) goto mdio_write_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) - | MACB_BF(RW, MACB_MAN_WRITE) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, regnum) - | MACB_BF(CODE, MACB_MAN_CODE) - | MACB_BF(DATA, value))); + if (regnum & MII_ADDR_C45) { + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_ADDR) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, (regnum >> 16) & 0x1F) + | MACB_BF(DATA, regnum & 0xFFFF) + | MACB_BF(CODE, MACB_MAN_C45_CODE))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_write_exit; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_WRITE) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, (regnum >> 16) & 0x1F) + | MACB_BF(CODE, MACB_MAN_C45_CODE) + | MACB_BF(DATA, value))); + } else { + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) + | MACB_BF(RW, MACB_MAN_C22_WRITE) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_C22_CODE) + | MACB_BF(DATA, value))); + }
status = macb_mdio_wait_for_idle(bp); if (status < 0) @@@ -650,21 -611,24 +650,24 @@@ static const struct phylink_mac_ops mac .mac_link_up = macb_mac_link_up, };
+ static bool macb_phy_handle_exists(struct device_node *dn) + { + dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(dn); + return dn != NULL; + } + static int macb_phylink_connect(struct macb *bp) { + struct device_node *dn = bp->pdev->dev.of_node; struct net_device *dev = bp->dev; struct phy_device *phydev; int ret;
- if (bp->pdev->dev.of_node && - of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) { - ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node, - 0); - if (ret) { - netdev_err(dev, "Could not attach PHY (%d)\n", ret); - return ret; - } - } else { + if (dn) + ret = phylink_of_phy_connect(bp->phylink, dn, 0); + + if (!dn || (ret && !macb_phy_handle_exists(dn))) { phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); @@@ -673,10 -637,11 +676,11 @@@
/* attach the mac to the phy */ ret = phylink_connect_phy(bp->phylink, phydev); - if (ret) { - netdev_err(dev, "Could not attach to PHY (%d)\n", ret); - return ret; - } + } + + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; }
phylink_start(bp->phylink); diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1930e39f195e,0dedd3e9c31e..649842a8aa28 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -804,26 -804,6 +804,26 @@@ static int setup_ppod_edram(struct adap return 0; }
+static void adap_config_hpfilter(struct adapter *adapter) +{ + u32 param, val = 0; + int ret; + + /* Enable HP filter region. Older fw will fail this request and + * it is fine. + */ + param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); + ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, + 1, ¶m, &val); + + /* An error means FW doesn't know about HP filter support, + * it's not a problem, don't return an error. + */ + if (ret < 0) + dev_err(adapter->pdev_dev, + "HP filter region isn't supported by FW\n"); +} + /** * cxgb4_write_rss - write the RSS table for a given port * @pi: the port @@@ -1447,8 -1427,8 +1447,8 @@@ static void mk_tid_release(struct sk_bu static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, unsigned int tid) { - void **p = &t->tid_tab[tid]; struct adapter *adap = container_of(t, struct adapter, tids); + void **p = &t->tid_tab[tid - t->tid_base];
spin_lock_bh(&adap->tid_release_lock); *p = adap->tid_release_head; @@@ -1500,13 -1480,13 +1500,13 @@@ static void process_tid_release_list(st void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, unsigned short family) { - struct sk_buff *skb; struct adapter *adap = container_of(t, struct adapter, tids); + struct sk_buff *skb;
- WARN_ON(tid >= t->ntids); + WARN_ON(tid_out_of_range(&adap->tids, tid));
- if (t->tid_tab[tid]) { - t->tid_tab[tid] = NULL; + if (t->tid_tab[tid - adap->tids.tid_base]) { + t->tid_tab[tid - adap->tids.tid_base] = NULL; atomic_dec(&t->conns_in_use); if (t->hash_base && (tid >= t->hash_base)) { if (family == AF_INET6) @@@ -1538,7 -1518,6 +1538,7 @@@ static int tid_init(struct tid_info *t struct adapter *adap = container_of(t, struct adapter, tids); unsigned int max_ftids = t->nftids + t->nsftids; unsigned int natids = t->natids; + unsigned int hpftid_bmap_size; unsigned int eotid_bmap_size; unsigned int stid_bmap_size; unsigned int ftid_bmap_size; @@@ -1546,15 -1525,12 +1546,15 @@@
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); ftid_bmap_size = BITS_TO_LONGS(t->nftids); + hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); eotid_bmap_size = BITS_TO_LONGS(t->neotids); size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + t->nstids * sizeof(*t->stid_tab) + t->nsftids * sizeof(*t->stid_tab) + stid_bmap_size * sizeof(long) + + t->nhpftids * sizeof(*t->hpftid_tab) + + hpftid_bmap_size * sizeof(long) + max_ftids * sizeof(*t->ftid_tab) + ftid_bmap_size * sizeof(long) + t->neotids * sizeof(*t->eotid_tab) + @@@ -1567,9 -1543,7 +1567,9 @@@ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; - t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; + t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; + t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; + t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; @@@ -1604,8 -1578,6 +1604,8 @@@ bitmap_zero(t->eotid_bmap, t->neotids); }
+ if (t->nhpftids) + bitmap_zero(t->hpftid_bmap, t->nhpftids); bitmap_zero(t->ftid_bmap, t->nftids); return 0; } @@@ -3163,9 -3135,9 +3163,9 @@@ static int cxgb_set_tx_maxrate(struct n { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; + struct ch_sched_queue qe = { 0 }; + struct ch_sched_params p = { 0 }; struct sched_class *e; - struct ch_sched_params p; - struct ch_sched_queue qe; u32 req_rate; int err = 0;
@@@ -3182,6 -3154,15 +3182,15 @@@ return -EINVAL; }
+ qe.queue = index; + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { + dev_err(adap->pdev_dev, + "Queue %u already bound to class %u of type: %u\n", + index, e->idx, e->info.u.params.level); + return -EBUSY; + } + /* Convert from Mbps to Kbps */ req_rate = rate * 1000;
@@@ -3211,7 -3192,6 +3220,6 @@@ return 0;
/* Fetch any available unused or matching scheduling class */ - memset(&p, 0, sizeof(p)); p.type = SCHED_CLASS_TYPE_PACKET; p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; p.u.params.mode = SCHED_CLASS_MODE_CLASS; @@@ -4379,7 -4359,6 +4387,7 @@@ static int adap_init0_config(struct ada "HMA configuration failed with error %d\n", ret);
if (is_t6(adapter->params.chip)) { + adap_config_hpfilter(adapter); ret = setup_ppod_edram(adapter); if (!ret) dev_info(adapter->pdev_dev, "Successfully enabled " @@@ -4689,6 -4668,16 +4697,6 @@@ static int adap_init0(struct adapter *a /* * Grab some of our basic fundamental operating parameters. */ -#define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) - -#define FW_PARAM_PFVF(param) \ - FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ - FW_PARAMS_PARAM_Y_V(0) | \ - FW_PARAMS_PARAM_Z_V(0) - params[0] = FW_PARAM_PFVF(EQ_START); params[1] = FW_PARAM_PFVF(L2T_START); params[2] = FW_PARAM_PFVF(L2T_END); @@@ -4706,16 -4695,6 +4714,16 @@@ adap->sge.ingr_start = val[5];
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + params[0] = FW_PARAM_PFVF(HPFILTER_START); + params[1] = FW_PARAM_PFVF(HPFILTER_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, + params, val); + if (ret < 0) + goto bye; + + adap->tids.hpftid_base = val[0]; + adap->tids.nhpftids = val[1] - val[0] + 1; + /* Read the raw mps entries. In T6, the last 2 tcam entries * are reserved for raw mac addresses (rawf = 2, one per port). */ @@@ -4727,9 -4706,6 +4735,9 @@@ adap->rawf_start = val[0]; adap->rawf_cnt = val[1] - val[0] + 1; } + + adap->tids.tid_base = + t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A); }
/* qids (ingress/egress) returned from firmware can be anywhere @@@ -5082,6 -5058,8 +5090,6 @@@ } adap->params.crypto = ntohs(caps_cmd.cryptocaps); } -#undef FW_PARAM_PFVF -#undef FW_PARAM_DEV
/* The MTU/MSS Table is initialized by now, so load their values. If * we're initializing the adapter, then we'll make any modifications diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 24c3c2dc7171,6d485803ddbe..1b7681a4eb32 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@@ -15,6 -15,8 +15,8 @@@ static int cxgb4_matchall_egress_valida struct flow_action *actions = &cls->rule->action; struct port_info *pi = netdev2pinfo(dev); struct flow_action_entry *entry; + struct ch_sched_queue qe; + struct sched_class *e; u64 max_link_rate; u32 i, speed; int ret; @@@ -60,7 -62,59 +62,59 @@@ } }
+ for (i = 0; i < pi->nqsets; i++) { + memset(&qe, 0, sizeof(qe)); + qe.queue = i; + + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) { + NL_SET_ERR_MSG_MOD(extack, + "Some queues are already bound to different class"); + return -EBUSY; + } + } + + return 0; + } + + static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc) + { + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + int ret; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = tc; + ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE); + if (ret) + goto out_free; + } + return 0; + + out_free: + while (i--) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } + + return ret; + } + + static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev) + { + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } }
static int cxgb4_matchall_alloc_tc(struct net_device *dev, @@@ -83,6 -137,7 +137,7 @@@ struct adapter *adap = netdev2adap(dev); struct flow_action_entry *entry; struct sched_class *e; + int ret; u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; @@@ -101,10 -156,21 +156,21 @@@ return -ENOMEM; }
+ ret = cxgb4_matchall_tc_bind_queues(dev, e->idx); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Could not bind queues to traffic class"); + goto out_free; + } + tc_port_matchall->egress.hwtc = e->idx; tc_port_matchall->egress.cookie = cls->cookie; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; + + out_free: + cxgb4_sched_class_free(dev, e->idx); + return ret; }
static void cxgb4_matchall_free_tc(struct net_device *dev) @@@ -114,6 -180,7 +180,7 @@@ struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + cxgb4_matchall_tc_unbind_queues(dev); cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; @@@ -137,7 -204,7 +204,7 @@@ static int cxgb4_matchall_alloc_filter( * -1 here. 1 slot is enough to create a wildcard matchall * VIID rule. */ - if (cls->common.prio <= adap->tids.nftids) + if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids)) fidx = cls->common.prio - 1; else fidx = cxgb4_get_free_ftid(dev, PF_INET); @@@ -156,8 -223,6 +223,8 @@@ fs = &tc_port_matchall->ingress.fs; memset(fs, 0, sizeof(*fs));
+ if (fidx < adap->tids.nhpftids) + fs->prio = 1; fs->tc_prio = cls->common.prio; fs->tc_cookie = cls->cookie; fs->hitcnts = 1; diff --combined drivers/net/ethernet/hisilicon/hns/hns_enet.c index e45553ec114a,eb69e5c81a4d..2fbb476584bc --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@@ -565,7 -565,6 +565,6 @@@ static int hns_nic_poll_rx_skb(struct h skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { - netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } @@@ -1056,7 -1055,6 +1055,6 @@@ static int hns_nic_common_poll(struct n container_of(napi, struct hns_nic_ring_data, napi); struct hnae_ring *ring = ring_data->ring;
- try_again: clean_complete += ring_data->poll_one( ring_data, budget - clean_complete, ring_data->ex_process); @@@ -1066,7 -1064,7 +1064,7 @@@ napi_complete(napi); ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); } else { - goto try_again; + return budget; } }
@@@ -1485,7 -1483,7 +1483,7 @@@ static int hns_nic_net_stop(struct net_
static void hns_tx_timeout_reset(struct hns_nic_priv *priv); #define HNS_TX_TIMEO_LIMIT (40 * HZ) -static void hns_nic_net_timeout(struct net_device *ndev) +static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) { struct hns_nic_priv *priv = netdev_priv(ndev);
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e240d99f7ca8,b3deb5e5ce29..914a7630ac48 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@@ -24,12 -24,6 +24,12 @@@
#include "hnae3.h" #include "hns3_enet.h" +/* All hns3 tracepoints are defined by the include below, which + * must be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "hns3_trace.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) @@@ -60,6 -54,8 +60,8 @@@ MODULE_PARM_DESC(debug, " Network inter #define HNS3_INNER_VLAN_TAG 1 #define HNS3_OUTER_VLAN_TAG 2
+ #define HNS3_MIN_TX_LEN 33U + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@@ -133,21 -129,18 +135,21 @@@ static int hns3_nic_init_irq(struct hns continue;
if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "TxRx", - txrx_int_idx++); + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "TxRx", txrx_int_idx++); txrx_int_idx++; } else if (tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Rx", - rx_int_idx++); + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "Rx", rx_int_idx++); } else if (tqp_vectors->tx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Tx", - tx_int_idx++); + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "Tx", tx_int_idx++); } else { /* Skip this unused q_vector */ continue; @@@ -164,8 -157,6 +166,8 @@@ return ret; }
+ disable_irq(tqp_vectors->vector_irq); + irq_set_affinity_hint(tqp_vectors->vector_irq, &tqp_vectors->affinity_mask);
@@@ -184,7 -175,6 +186,7 @@@ static void hns3_mask_vector_irq(struc static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) { napi_enable(&tqp_vector->napi); + enable_irq(tqp_vector->vector_irq);
/* enable vector */ hns3_mask_vector_irq(tqp_vector, 1); @@@ -384,6 -374,18 +386,6 @@@ static int hns3_nic_net_up(struct net_d if (ret) return ret;
- /* the device can work without cpu rmap, only aRFS needs it */ - ret = hns3_set_rx_cpu_rmap(netdev); - if (ret) - netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret); - - /* get irq resource for all vectors */ - ret = hns3_nic_init_irq(priv); - if (ret) { - netdev_err(netdev, "init irq failed! ret=%d\n", ret); - goto free_rmap; - } - clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
/* enable the vectors */ @@@ -396,15 -398,22 +398,15 @@@
/* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; - if (ret) - goto out_start_err; - - return 0; - -out_start_err: - set_bit(HNS3_NIC_STATE_DOWN, &priv->state); - while (j--) - hns3_tqp_disable(h->kinfo.tqp[j]); + if (ret) { + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + while (j--) + hns3_tqp_disable(h->kinfo.tqp[j]);
- for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + }
- hns3_nic_uninit_irq(priv); -free_rmap: - hns3_free_rx_cpu_rmap(netdev); return ret; }
@@@ -501,6 -510,11 +503,6 @@@ static void hns3_nic_net_down(struct ne if (ops->stop) ops->stop(priv->ae_handle);
- hns3_free_rx_cpu_rmap(netdev); - - /* free irq resources */ - hns3_nic_uninit_irq(priv); - /* delay ring buffer clearing to hns3_reset_notify_uninit_enet * during reset process, because driver may not be able * to disable the ring through firmware when downing the netdev. @@@ -722,8 -736,6 +724,8 @@@ static int hns3_set_tso(struct sk_buff /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size;
+ trace_hns3_tso(skb); + return 0; }
@@@ -1128,7 -1140,6 +1130,7 @@@ static int hns3_fill_desc(struct hns3_e desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use); ring_ptr_move_fw(ring, next_to_use); return HNS3_LIKELY_BD_NUM; } @@@ -1152,7 -1163,6 +1154,7 @@@ desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use); /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use);
@@@ -1278,14 -1288,6 +1280,14 @@@ static bool hns3_skb_need_linearized(st return false; }
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) +{ + int i = 0; + + for (i = 0; i < MAX_SKB_FRAGS; i++) + size[i] = skb_frag_size(&shinfo->frags[i]); +} + static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct net_device *netdev, struct sk_buff *skb) @@@ -1297,10 -1299,8 +1299,10 @@@ bd_num = hns3_tx_bd_num(skb, bd_size); if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && - !hns3_skb_need_linearized(skb, bd_size, bd_num)) + !hns3_skb_need_linearized(skb, bd_size, bd_num)) { + trace_hns3_over_8bd(skb); goto out; + }
if (__skb_linearize(skb)) return -ENOMEM; @@@ -1308,10 -1308,8 +1310,10 @@@ bd_num = hns3_tx_bd_count(skb->len); if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || (!skb_is_gso(skb) && - bd_num > HNS3_MAX_NON_TSO_BD_NUM)) + bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { + trace_hns3_over_8bd(skb); return -ENOMEM; + }
u64_stats_update_begin(&ring->syncp); ring->stats.tx_copy++; @@@ -1409,6 -1407,10 +1411,10 @@@ netdev_tx_t hns3_nic_net_xmit(struct sk int bd_num = 0; int ret;
+ /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + return NETDEV_TX_OK; + /* Prefetch the data used later */ prefetch(skb->data);
@@@ -1452,7 -1454,6 +1458,7 @@@ out (ring->desc_num - 1); ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_FE_B)); + trace_hns3_tx_desc(ring, pre_ntu);
/* Complete translate all packets */ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); @@@ -1561,37 -1562,6 +1567,37 @@@ static int hns3_nic_set_features(struc return 0; }
+static netdev_features_t hns3_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ +#define HNS3_MAX_HDR_LEN 480U +#define HNS3_MAX_L4_HDR_LEN 60U + + size_t len; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + if (skb->encapsulation) + len = skb_inner_transport_header(skb) - skb->data; + else + len = skb_transport_header(skb) - skb->data; + + /* Assume L4 is 60 byte as TCP is the only protocol with a + * a flexible value, and it's max len is 60 bytes. + */ + len += HNS3_MAX_L4_HDR_LEN; + + /* Hardware only supports checksum on the skb with a max header + * len of 480 bytes. + */ + if (len > HNS3_MAX_HDR_LEN) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} + static void hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@@ -1905,7 -1875,7 +1911,7 @@@ static bool hns3_get_tx_timeo_queue_inf return true; }
-static void hns3_nic_net_timeout(struct net_device *ndev) +static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; @@@ -2006,7 -1976,6 +2012,7 @@@ static const struct net_device_ops hns3 .ndo_do_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, + .ndo_features_check = hns3_features_check, .ndo_get_stats64 = hns3_nic_get_stats64, .ndo_setup_tc = hns3_nic_setup_tc, .ndo_set_rx_mode = hns3_nic_set_rx_mode, @@@ -2705,9 -2674,6 +2711,9 @@@ static int hns3_gro_complete(struct sk_ skb->csum_start = (unsigned char *)th - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; + + trace_hns3_gro(skb); + return 0; }
@@@ -2828,6 -2794,7 +2834,6 @@@ static bool hns3_parse_vlan_tag(struct static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, unsigned char *va) { -#define HNS3_NEED_ADD_FRAG 1 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; struct net_device *netdev = ring_to_netdev(ring); struct sk_buff *skb; @@@ -2844,7 -2811,6 +2850,7 @@@ return -ENOMEM; }
+ trace_hns3_rx_desc(ring); prefetchw(skb->data);
ring->pending_buf = 1; @@@ -2872,19 -2838,33 +2878,19 @@@ desc_cb); ring_ptr_move_fw(ring, next_to_clean);
- return HNS3_NEED_ADD_FRAG; + return 0; }
-static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, - bool pending) +static int hns3_add_frag(struct hns3_enet_ring *ring) { struct sk_buff *skb = ring->skb; struct sk_buff *head_skb = skb; struct sk_buff *new_skb; struct hns3_desc_cb *desc_cb; - struct hns3_desc *pre_desc; + struct hns3_desc *desc; u32 bd_base_info; - int pre_bd;
- /* if there is pending bd, the SW param next_to_clean has moved - * to next and the next is NULL - */ - if (pending) { - pre_bd = (ring->next_to_clean - 1 + ring->desc_num) % - ring->desc_num; - pre_desc = &ring->desc[pre_bd]; - bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info); - } else { - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - } - - while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { + do { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); @@@ -2919,10 -2899,9 +2925,10 @@@ }
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); + trace_hns3_rx_desc(ring); ring_ptr_move_fw(ring, next_to_clean); ring->pending_buf++; - } + } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
return 0; } @@@ -3090,23 -3069,28 +3096,23 @@@ static int hns3_handle_rx_bd(struct hns
if (ret < 0) /* alloc buffer fail */ return ret; - if (ret > 0) { /* need add frag */ - ret = hns3_add_frag(ring, desc, false); + if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ + ret = hns3_add_frag(ring); if (ret) return ret; - - /* As the head data may be changed when GRO enable, copy - * the head data in after other data rx completed - */ - memcpy(skb->data, ring->va, - ALIGN(ring->pull_len, sizeof(long))); } } else { - ret = hns3_add_frag(ring, desc, true); + ret = hns3_add_frag(ring); if (ret) return ret; + }
- /* As the head data may be changed when GRO enable, copy - * the head data in after other data rx completed - */ + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + if (skb->len > HNS3_RX_HEAD_SIZE) memcpy(skb->data, ring->va, ALIGN(ring->pull_len, sizeof(long))); - }
ret = hns3_handle_bdinfo(ring, skb); if (unlikely(ret)) { @@@ -3612,25 -3596,26 +3618,25 @@@ static void hns3_nic_uninit_vector_data if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) continue;
- hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); + /* Since the mapping can be overwritten, when fail to get the + * chain between vector and ring, we should go on to deal with + * the remaining options. + */ + if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) + dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h, tqp_vector->vector_irq, &vector_ring_chain);
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { - irq_set_affinity_hint(tqp_vector->vector_irq, NULL); - free_irq(tqp_vector->vector_irq, tqp_vector); - tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; - } - hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); netif_napi_del(&priv->tqp_vector[i].napi); } }
-static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) +static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; struct pci_dev *pdev = h->pdev; @@@ -3642,10 -3627,11 +3648,10 @@@ tqp_vector = &priv->tqp_vector[i]; ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); if (ret) - return ret; + return; }
devm_kfree(&pdev->dev, priv->tqp_vector); - return 0; }
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, @@@ -4044,18 -4030,6 +4050,18 @@@ static int hns3_client_init(struct hnae goto out_reg_netdev_fail; }
+ /* the device can work without cpu rmap, only aRFS needs it */ + ret = hns3_set_rx_cpu_rmap(netdev); + if (ret) + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); + + ret = hns3_nic_init_irq(priv); + if (ret) { + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); + hns3_free_rx_cpu_rmap(netdev); + goto out_init_irq_fail; + } + ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); @@@ -4077,9 -4051,6 +4083,9 @@@ return ret;
out_client_start: + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); +out_init_irq_fail: unregister_netdev(netdev); out_reg_netdev_fail: hns3_uninit_phy(netdev); @@@ -4117,17 -4088,15 +4123,17 @@@ static void hns3_client_uninit(struct h goto out_netdev_free; }
+ hns3_free_rx_cpu_rmap(netdev); + + hns3_nic_uninit_irq(priv); + hns3_del_all_fd_rules(netdev, true);
hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
- ret = hns3_nic_dealloc_vector_data(priv); - if (ret) - netdev_err(netdev, "dealloc vector error\n"); + hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv); if (ret) @@@ -4454,32 -4423,17 +4460,32 @@@ static int hns3_reset_notify_init_enet( if (ret) goto err_uninit_vector;
+ /* the device can work without cpu rmap, only aRFS needs it */ + ret = hns3_set_rx_cpu_rmap(netdev); + if (ret) + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); + + ret = hns3_nic_init_irq(priv); + if (ret) { + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); + hns3_free_rx_cpu_rmap(netdev); + goto err_init_irq_fail; + } + ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); - goto err_uninit_ring; + goto err_client_start_fail; }
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
-err_uninit_ring: +err_client_start_fail: + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); +err_init_irq_fail: hns3_uninit_all_ring(priv); err_uninit_vector: hns3_nic_uninit_vector_data(priv); @@@ -4529,8 -4483,6 +4535,8 @@@ static int hns3_reset_notify_uninit_ene return 0; }
+ hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); hns3_clear_all_ring(handle, true); hns3_reset_tx_queue(priv->ae_handle);
@@@ -4538,7 -4490,9 +4544,7 @@@
hns3_store_coal(priv);
- ret = hns3_nic_dealloc_vector_data(priv); - if (ret) - netdev_err(netdev, "dealloc vector error\n"); + hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv); if (ret) diff --combined drivers/net/ethernet/intel/e1000e/netdev.c index 8797913b2702,7c5b18d87b49..db4ea58bac82 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -1780,8 -1780,7 +1780,7 @@@ static irqreturn_t e1000_intr_msi(int _ } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); }
/* Reset on uncorrectable ECC error */ @@@ -1861,8 -1860,7 +1860,7 @@@ static irqreturn_t e1000_intr(int __alw } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); }
/* Reset on uncorrectable ECC error */ @@@ -1907,8 -1905,7 +1905,7 @@@ static irqreturn_t e1000_msix_other(in hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); }
if (!test_bit(__E1000_DOWN, &adapter->state)) @@@ -4284,6 -4281,7 +4281,7 @@@ void e1000e_down(struct e1000_adapter *
napi_synchronize(&adapter->napi);
+ del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock); @@@ -4723,7 -4721,7 +4721,7 @@@ int e1000e_close(struct net_device *net e1000_free_irq(adapter);
/* Link status message must follow this format */ - pr_info("%s NIC Link is Down\n", netdev->name); + netdev_info(netdev, "NIC Link is Down\n"); }
napi_disable(&adapter->napi); @@@ -5073,13 -5071,12 +5071,13 @@@ static void e1000_print_link_info(struc u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */ - pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", - adapter->netdev->name, adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", - (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : - (ctrl & E1000_CTRL_RFCE) ? "Rx" : - (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); + netdev_info(adapter->netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : + (ctrl & E1000_CTRL_RFCE) ? "Rx" : + (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); }
static bool e1000e_has_link(struct e1000_adapter *adapter) @@@ -5156,11 -5153,25 +5154,25 @@@ static void e1000e_check_82574_phy_work } }
+ /** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ + static void e1000_watchdog(struct timer_list *t) + { + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ + } + static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, - watchdog_task.work); + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@@ -5308,7 -5319,7 +5320,7 @@@ adapter->link_speed = 0; adapter->link_duplex = 0; /* Link status message must follow this format */ - pr_info("%s NIC Link is Down\n", adapter->netdev->name); + netdev_info(netdev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_stop_queue(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) @@@ -5408,9 -5419,8 +5420,8 @@@ link_up
/* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, - round_jiffies(2 * HZ)); + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); }
#define E1000_TX_FLAGS_CSUM 0x00000001 @@@ -5930,7 -5940,7 +5941,7 @@@ static netdev_tx_t e1000_xmit_frame(str * e1000_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void e1000_tx_timeout(struct net_device *netdev) +static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct e1000_adapter *adapter = netdev_priv(netdev);
@@@ -7450,21 -7460,11 +7461,11 @@@ static int e1000_probe(struct pci_dev * goto err_eeprom; }
- adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - e1000e_driver_name); - - if (!adapter->e1000_workqueue) { - err = -ENOMEM; - goto err_workqueue; - } - - INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); - queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task, - 0); - + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); @@@ -7558,9 -7558,6 +7559,6 @@@ return 0;
err_register: - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); - err_workqueue: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: @@@ -7605,17 -7602,15 +7603,15 @@@ static void e1000_remove(struct pci_de * from being rescheduled. */ set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task);
- cancel_delayed_work(&adapter->watchdog_task); - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); - if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { diff --combined drivers/net/ethernet/intel/iavf/iavf_main.c index 0a8824871618,8e16be960e96..62fe56ddcb6e --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@@ -159,7 -159,7 +159,7 @@@ void iavf_schedule_reset(struct iavf_ad * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void iavf_tx_timeout(struct net_device *netdev) +static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct iavf_adapter *adapter = netdev_priv(netdev);
@@@ -743,9 -743,8 +743,8 @@@ iavf_mac_filter *iavf_find_filter(struc * * Returns ptr to the filter object or NULL when no memory available. **/ - static struct - iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, - const u8 *macaddr) + struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { struct iavf_mac_filter *f;
@@@ -2065,9 -2064,9 +2064,9 @@@ static void iavf_reset_task(struct work struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; - struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@@ -2181,6 -2180,16 +2180,16 @@@ continue_reset
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have + * been changed by the PF via administratively set MAC. + * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. + */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { + list_del(&f->list); + kfree(f); + } + } /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; diff --combined drivers/net/ethernet/intel/igb/igb_ethtool.c index 43c438365389,445fbdce3e25..f96ffa83efbe --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@@ -181,7 -181,7 +181,7 @@@ static int igb_get_link_ksettings(struc advertising &= ~ADVERTISED_1000baseKX_Full; } } - if (eth_flags->e100_base_fx) { + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { supported |= SUPPORTED_100baseT_Full; advertising |= ADVERTISED_100baseT_Full; } @@@ -396,7 -396,6 +396,7 @@@ static int igb_set_pauseparam(struct ne struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int retval = 0; + int i;
/* 100basefx does not support setting link flow control */ if (hw->dev_spec._82575.eth_flags.e100_base_fx) @@@ -429,13 -428,6 +429,13 @@@
retval = ((hw->phy.media_type == e1000_media_type_copper) ? igb_force_mac_fc(hw) : igb_setup_link(hw)); + + /* Make sure SRRCTL considers new fc settings for each ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + igb_setup_srrctl(adapter, ring); + } }
clear_bit(__IGB_RESETTING, &adapter->state); diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4c13cca656b2,a2b2ad1f60b1..718931d951bc --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -5239,7 -5239,7 +5239,7 @@@ static void ixgbe_fdir_filter_restore(s struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; - u64 action; + u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@@ -5248,17 -5248,34 +5248,34 @@@
hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { - action = filter->action; - if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) - action = - (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + if (filter->action == IXGBE_FDIR_DROP_QUEUE) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && (ring >= adapter->num_rx_queues)) { + e_err(drv, "FDIR restore failed without VF, ring: %u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + }
ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[action]->reg_idx); + &filter->filter, filter->sw_idx, queue); }
spin_unlock(&adapter->fdir_perfect_lock); @@@ -6158,7 -6175,7 +6175,7 @@@ static void ixgbe_set_eee_capable(struc * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void ixgbe_tx_timeout(struct net_device *netdev) +static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index fa286694ac2c,64ec0e7c64b4..4622c4ea2e46 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -250,7 -250,7 +250,7 @@@ static void ixgbevf_tx_timeout_reset(st * ixgbevf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void ixgbevf_tx_timeout(struct net_device *netdev) +static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@@ -2081,11 -2081,6 +2081,6 @@@ static int ixgbevf_write_uc_addr_list(s struct ixgbe_hw *hw = &adapter->hw; int count = 0;
- if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); - return -ENOSPC; - } - if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha;
diff --combined drivers/net/ethernet/marvell/mvneta.c index 587e35d10dc7,67ad8b8b127d..0449d4b28ade --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -2081,7 -2081,11 +2081,11 @@@ static in mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp) { - u32 ret, act = bpf_prog_run_xdp(prog, xdp); + unsigned int len; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + act = bpf_prog_run_xdp(prog, xdp);
switch (act) { case XDP_PASS: @@@ -2094,9 -2098,8 +2098,8 @@@ if (err) { ret = MVNETA_XDP_DROPPED; __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); } else { ret = MVNETA_XDP_REDIR; } @@@ -2106,9 -2109,8 +2109,8 @@@ ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); break; default: bpf_warn_invalid_xdp_action(act); @@@ -2119,8 -2121,7 +2121,7 @@@ case XDP_DROP: __page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + len, true); ret = MVNETA_XDP_DROPPED; break; } @@@ -3071,7 -3072,7 +3072,7 @@@ static int mvneta_create_page_pool(stru .order = 0, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = size, - .nid = cpu_to_node(0), + .nid = NUMA_NO_NODE, .dev = pp->dev->dev.parent, .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, .offset = pp->rx_offset_correction, diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index cc30a69a6df0,8ed15199eb4f..8639f32ec4d5 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -45,9 -45,11 +45,9 @@@ #include "spectrum_ptp.h" #include "../mlxfw/mlxfw.h"
-#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) - #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2000 -#define MLXSW_SP1_FWREV_SUBMINOR 2308 +#define MLXSW_SP1_FWREV_SUBMINOR 2714 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@@ -64,7 -66,7 +64,7 @@@
#define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2000 -#define MLXSW_SP2_FWREV_SUBMINOR 2308 +#define MLXSW_SP2_FWREV_SUBMINOR 2714
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@@ -421,12 -423,13 +421,12 @@@ static int mlxsw_sp_fw_rev_validate(str rev->major, req_rev->major); return -EINVAL; } - if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && - mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) + if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", - rev->major, rev->minor, rev->subminor); + dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", + rev->major, rev->minor, rev->subminor, req_rev->major, + req_rev->minor, req_rev->subminor); dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", fw_filename);
@@@ -857,23 -860,17 +857,17 @@@ static netdev_tx_t mlxsw_sp_port_xmit(s u64 len; int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } - if (eth_skb_pad(skb)) { this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); return NETDEV_TX_OK; @@@ -1212,6 -1209,9 +1206,9 @@@ static void update_stats_cache(struct w periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev)) + /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as + * necessary when port goes down. + */ goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, @@@ -1793,8 -1793,6 +1790,8 @@@ static int mlxsw_sp_setup_tc(struct net return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); case TC_SETUP_QDISC_PRIO: return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); + case TC_SETUP_QDISC_ETS: + return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); default: return -EOPNOTSUPP; } @@@ -3601,25 -3599,26 +3598,25 @@@ static int mlxsw_sp_port_ets_init(struc * one subgroup, which are all member in the same group. */ err = mlxsw_sp_port_ets_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, - 0); + MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); if (err) return err; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlxsw_sp_port_ets_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + MLXSW_REG_QEEC_HR_SUBGROUP, i, 0, false, 0); if (err) return err; } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlxsw_sp_port_ets_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_TC, i, i, + MLXSW_REG_QEEC_HR_TC, i, i, false, 0); if (err) return err;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_TC, + MLXSW_REG_QEEC_HR_TC, i + 8, i, true, 100); if (err) @@@ -3631,13 -3630,13 +3628,13 @@@ * for the initial configuration. */ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, + MLXSW_REG_QEEC_HR_PORT, 0, 0, MLXSW_REG_QEEC_MAS_DIS); if (err) return err; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + MLXSW_REG_QEEC_HR_SUBGROUP, i, 0, MLXSW_REG_QEEC_MAS_DIS); if (err) @@@ -3645,14 -3644,14 +3642,14 @@@ } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_TC, + MLXSW_REG_QEEC_HR_TC, i, i, MLXSW_REG_QEEC_MAS_DIS); if (err) return err;
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_TC, + MLXSW_REG_QEEC_HR_TC, i + 8, i, MLXSW_REG_QEEC_MAS_DIS); if (err) @@@ -3662,7 -3661,7 +3659,7 @@@ /* Configure the min shaper for multicast TCs. */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HIERARCY_TC, + MLXSW_REG_QEEC_HR_TC, i + 8, i, MLXSW_REG_QEEC_MIS_MIN); if (err) @@@ -4322,6 -4321,15 +4319,15 @@@ static int mlxsw_sp_port_unsplit(struc return 0; }
+ static void + mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) + { + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; + } + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@@ -4343,6 -4351,7 +4349,7 @@@ } else { netdev_info(mlxsw_sp_port->dev, "link down\n"); netif_carrier_off(mlxsw_sp_port->dev); + mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); } }
@@@ -4538,16 -4547,10 +4545,16 @@@ static const struct mlxsw_listener mlxs false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, + ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, + ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, + ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, + ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@@ -5136,6 -5139,27 +5143,27 @@@ static int mlxsw_sp2_init(struct mlxsw_ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); }
+ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct netlink_ext_ack *extack) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; + mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; + mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); + } + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@@ -5638,7 -5662,7 +5666,7 @@@ static struct mlxsw_driver mlxsw_sp2_dr static struct mlxsw_driver mlxsw_sp3_driver = { .kind = mlxsw_sp3_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp2_init, + .init = mlxsw_sp3_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 54807b4930fe,0124bfe1963b..d57c9b15f45e --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@@ -18,7 -18,6 +18,7 @@@ enum mlxsw_sp_qdisc_type MLXSW_SP_QDISC_NO_QDISC, MLXSW_SP_QDISC_RED, MLXSW_SP_QDISC_PRIO, + MLXSW_SP_QDISC_ETS, };
struct mlxsw_sp_qdisc_ops { @@@ -196,6 -195,20 +196,20 @@@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_ return -EOPNOTSUPP; }
+ static u64 + mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) + { + return xstats->backlog[tclass_num] + + xstats->backlog[tclass_num + 8]; + } + + static u64 + mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) + { + return xstats->tail_drop[tclass_num] + + xstats->tail_drop[tclass_num + 8]; + } + static void mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, u8 prio_bitmap, u64 *tx_packets, @@@ -270,7 -283,7 +284,7 @@@ mlxsw_sp_setup_tc_qdisc_red_clean_stats &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; - red_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; stats_base->drops = red_base->prob_drop + red_base->pdrop; @@@ -371,7 -384,8 +385,8 @@@ mlxsw_sp_qdisc_get_red_xstats(struct ml
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; marks = xstats->ecn - xstats_base->prob_mark; - pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - + xstats_base->pdrop;
res->pdrop += pdrops; res->prob_drop += early_drops; @@@ -404,9 -418,10 +419,10 @@@ mlxsw_sp_qdisc_get_red_stats(struct mlx
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; - drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + drops = xstats->wred_drop[tclass_num] + + mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - stats_base->drops; - backlog = xstats->backlog[tclass_num]; + backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); stats_ptr->qstats->overlimits += overlimits; @@@ -472,16 -487,14 +488,16 @@@ int mlxsw_sp_setup_tc_red(struct mlxsw_ }
static int -mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port) { int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, MLXSW_SP_PORT_DEFAULT_TCLASS); + mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HR_SUBGROUP, + i, 0, false, 0); mlxsw_sp_qdisc_destroy(mlxsw_sp_port, &mlxsw_sp_port->tclass_qdiscs[i]); mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0; @@@ -491,58 -504,36 +507,58 @@@ }
static int -mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - void *params) +mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - struct tc_prio_qopt_offload_params *p = params; + return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port); +}
- if (p->bands > IEEE_8021QAZ_MAX_TCS) +static int +__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands) +{ + if (nbands > IEEE_8021QAZ_MAX_TCS) return -EOPNOTSUPP;
return 0; }
static int -mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - void *params) +mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) { struct tc_prio_qopt_offload_params *p = params; + + return __mlxsw_sp_qdisc_ets_check_params(p->bands); +} + +static int +__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, + unsigned int nbands, + const unsigned int *quanta, + const unsigned int *weights, + const u8 *priomap) +{ struct mlxsw_sp_qdisc *child_qdisc; int tclass, i, band, backlog; u8 old_priomap; int err;
- for (band = 0; band < p->bands; band++) { + for (band = 0; band < nbands; band++) { tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; old_priomap = child_qdisc->prio_bitmap; child_qdisc->prio_bitmap = 0; + + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HR_SUBGROUP, + tclass, 0, !!quanta[band], + weights[band]); + if (err) + return err; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (p->priomap[i] == band) { + if (priomap[i] == band) { child_qdisc->prio_bitmap |= BIT(i); if (BIT(i) & old_priomap) continue; @@@ -565,46 -556,21 +581,46 @@@ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; child_qdisc->prio_bitmap = 0; mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc); + mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HR_SUBGROUP, + tclass, 0, false, 0); } return 0; }
+static int +mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_prio_qopt_offload_params *p = params; + unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0}; + + return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands, + zeroes, zeroes, p->priomap); +} + +static void +__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct gnet_stats_queue *qstats) +{ + u64 backlog; + + backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_qdisc->stats_base.backlog); + qstats->backlog -= backlog; +} + static void mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { struct tc_prio_qopt_offload_params *p = params; - u64 backlog;
- backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_qdisc->stats_base.backlog); - p->qstats->backlog -= backlog; + __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, + p->qstats); }
static int @@@ -626,9 -592,9 +642,9 @@@ mlxsw_sp_qdisc_get_prio_stats(struct ml tx_packets = stats->tx_packets - stats_base->tx_packets;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - drops += xstats->tail_drop[i]; + drops += mlxsw_sp_xstats_tail_drop(xstats, i); drops += xstats->wred_drop[i]; - backlog += xstats->backlog[i]; + backlog += mlxsw_sp_xstats_backlog(xstats, i); } drops = drops - stats_base->drops;
@@@ -664,7 -630,7 +680,7 @@@ mlxsw_sp_setup_tc_qdisc_prio_clean_stat
stats_base->drops = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); stats_base->drops += xstats->wred_drop[i]; }
@@@ -681,93 -647,27 +697,93 @@@ static struct mlxsw_sp_qdisc_ops mlxsw_ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, };
-/* Grafting is not supported in mlxsw. It will result in un-offloading of the - * grafted qdisc as well as the qdisc in the qdisc new location. - * (However, if the graft is to the location where the qdisc is already at, it - * will be ignored completely and won't cause un-offloading). +static int +mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_ets_qopt_offload_replace_params *p = params; + + return __mlxsw_sp_qdisc_ets_check_params(p->bands); +} + +static int +mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_ets_qopt_offload_replace_params *p = params; + + return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands, + p->quanta, p->weights, p->priomap); +} + +static void +mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_ets_qopt_offload_replace_params *p = params; + + __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, + p->qstats); +} + +static int +mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port); +} + +static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = { + .type = MLXSW_SP_QDISC_ETS, + .check_params = mlxsw_sp_qdisc_ets_check_params, + .replace = mlxsw_sp_qdisc_ets_replace, + .unoffload = mlxsw_sp_qdisc_ets_unoffload, + .destroy = mlxsw_sp_qdisc_ets_destroy, + .get_stats = mlxsw_sp_qdisc_get_prio_stats, + .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, +}; + +/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting + * graph is free of cycles). These operations do not change the parent handle + * though, which means it can be incomplete (if there is more than one class + * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was + * linked to a different class and then removed from the original class). + * + * E.g. consider this sequence of operations: + * + * # tc qdisc add dev swp1 root handle 1: prio + * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000 + * RED: set bandwidth to 10Mbit + * # tc qdisc link dev swp1 handle 13: parent 1:2 + * + * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their + * child. But RED will still only claim that 1:3 is its parent. If it's removed + * from that band, its only parent will be 1:2, but it will continue to claim + * that it is in fact 1:3. + * + * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before + * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace + * notification to offload the child Qdisc, based on its parent handle, and use + * the graft operation to validate that the class where the child is actually + * grafted corresponds to the parent handle. If the two don't match, we + * unoffload the child. */ static int -mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - struct tc_prio_qopt_offload_graft_params *p) +__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u8 band, u32 child_handle) { - int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band); + int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); struct mlxsw_sp_qdisc *old_qdisc;
- /* Check if the grafted qdisc is already in its "new" location. If so - - * nothing needs to be done. - */ - if (p->band < IEEE_8021QAZ_MAX_TCS && - mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) + if (band < IEEE_8021QAZ_MAX_TCS && + mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle) return 0;
- if (!p->child_handle) { + if (!child_handle) { /* This is an invisible FIFO replacing the original Qdisc. * Ignore it--the original Qdisc's destroy will follow. */ @@@ -778,7 -678,7 +794,7 @@@ * unoffload it. */ old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, - p->child_handle); + child_handle); if (old_qdisc) mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
@@@ -787,15 -687,6 +803,15 @@@ return -EOPNOTSUPP; }
+static int +mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_prio_qopt_offload_graft_params *p) +{ + return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc, + p->band, p->child_handle); +} + int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_prio_qopt_offload *p) { @@@ -829,40 -720,6 +845,40 @@@ } }
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_ets_qopt_offload *p) +{ + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); + if (!mlxsw_sp_qdisc) + return -EOPNOTSUPP; + + if (p->command == TC_ETS_REPLACE) + return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, + &mlxsw_sp_qdisc_ops_ets, + &p->replace_params); + + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, + MLXSW_SP_QDISC_ETS)) + return -EOPNOTSUPP; + + switch (p->command) { + case TC_ETS_DESTROY: + return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + case TC_ETS_STATS: + return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, + &p->stats); + case TC_ETS_GRAFT: + return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc, + p->graft_params.band, + p->graft_params.child_handle); + default: + return -EOPNOTSUPP; + } +} + int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; diff --combined drivers/net/ethernet/renesas/sh_eth.c index cdd8ab2eb910,3591285250e1..c922d7a553f2 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -2204,24 -2204,28 +2204,28 @@@ static size_t __sh_eth_get_regs(struct if (cd->tsu) { add_tsu_reg(ARSTR); add_tsu_reg(TSU_CTRST); - add_tsu_reg(TSU_FWEN0); - add_tsu_reg(TSU_FWEN1); - add_tsu_reg(TSU_FCM); - add_tsu_reg(TSU_BSYSL0); - add_tsu_reg(TSU_BSYSL1); - add_tsu_reg(TSU_PRISL0); - add_tsu_reg(TSU_PRISL1); - add_tsu_reg(TSU_FWSL0); - add_tsu_reg(TSU_FWSL1); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAGM0); - add_tsu_reg(TSU_QTAGM1); - add_tsu_reg(TSU_FWSR); - add_tsu_reg(TSU_FWINMK); - add_tsu_reg(TSU_ADQT0); - add_tsu_reg(TSU_ADQT1); - add_tsu_reg(TSU_VTAG0); - add_tsu_reg(TSU_VTAG1); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } add_tsu_reg(TSU_ADSBSY); add_tsu_reg(TSU_TEN); add_tsu_reg(TSU_POST1); @@@ -2478,7 -2482,7 +2482,7 @@@ out_napi_off }
/* Timeout function */ -static void sh_eth_tx_timeout(struct net_device *ndev) +static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_rxdesc *rxdesc; diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 5a3aea628fc2,450d7dac3ea6..2aba2673d6c3 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@@ -14,7 -14,6 +14,7 @@@ #include <linux/phy.h> #include <linux/udp.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/tcp.h> #include <net/udp.h> #include <net/tc_act/tc_gact.h> @@@ -51,7 -50,6 +51,7 @@@ struct stmmac_packet_attrs u8 id; int sarc; u16 queue_mapping; + u64 timestamp; };
static u8 stmmac_test_next_id; @@@ -82,7 -80,7 +82,7 @@@ static struct sk_buff *stmmac_test_get_ if (attr->max_size && (attr->max_size > size)) size = attr->max_size;
- skb = netdev_alloc_skb_ip_align(priv->dev, size); + skb = netdev_alloc_skb(priv->dev, size); if (!skb) return NULL;
@@@ -210,9 -208,6 +210,9 @@@ skb->pkt_type = PACKET_HOST; skb->dev = priv->dev;
+ if (attr->timestamp) + skb->tstamp = ns_to_ktime(attr->timestamp); + return skb; }
@@@ -249,6 -244,8 +249,8 @@@ static int stmmac_test_loopback_validat struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + unsigned char *src = tpriv->packet->src; + unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@@ -265,15 -262,15 +267,15 @@@ goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb); - if (tpriv->packet->dst) { - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (dst) { + if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) goto out; } if (tpriv->packet->sarc) { - if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) + if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) goto out; - } else if (tpriv->packet->src) { - if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) + } else if (src) { + if (!ether_addr_equal_unaligned(ehdr->h_source, src)) goto out; }
@@@ -344,7 -341,8 +346,7 @@@ static int __stmmac_test_loopback(struc goto cleanup; }
- skb_set_queue_mapping(skb, attr->queue_mapping); - ret = dev_queue_xmit(skb); + ret = dev_direct_xmit(skb, attr->queue_mapping); if (ret) goto cleanup;
@@@ -718,7 -716,7 +720,7 @@@ static int stmmac_test_flowctrl_validat struct ethhdr *ehdr;
ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) + if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) goto out; if (ehdr->h_proto != htons(ETH_P_PAUSE)) goto out; @@@ -855,12 -853,16 +857,16 @@@ static int stmmac_test_vlan_validate(st if (tpriv->vlan_id) { if (skb->vlan_proto != htons(proto)) goto out; - if (skb->vlan_tci != tpriv->vlan_id) + if (skb->vlan_tci != tpriv->vlan_id) { + /* Means filter did not work. */ + tpriv->ok = false; + complete(&tpriv->comp); goto out; + } }
ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) goto out;
ihdr = ip_hdr(skb); @@@ -930,7 -932,8 +936,7 @@@ static int __stmmac_test_vlanfilt(struc goto vlan_del; }
- skb_set_queue_mapping(skb, 0); - ret = dev_queue_xmit(skb); + ret = dev_direct_xmit(skb, 0); if (ret) goto vlan_del;
@@@ -968,6 -971,9 +974,9 @@@ static int stmmac_test_vlanfilt_perfect { int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_vlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@@ -1021,7 -1027,8 +1030,7 @@@ static int __stmmac_test_dvlanfilt(stru goto vlan_del; }
- skb_set_queue_mapping(skb, 0); - ret = dev_queue_xmit(skb); + ret = dev_direct_xmit(skb, 0); if (ret) goto vlan_del;
@@@ -1059,6 -1066,9 +1068,9 @@@ static int stmmac_test_dvlanfilt_perfec { int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_dvlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@@ -1288,7 -1298,8 +1300,7 @@@ static int stmmac_test_vlanoff_common(s __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); skb->protocol = htons(proto);
- skb_set_queue_mapping(skb, 0); - ret = dev_queue_xmit(skb); + ret = dev_direct_xmit(skb, 0); if (ret) goto vlan_del;
@@@ -1324,16 -1335,19 +1336,19 @@@ static int __stmmac_test_l3filt(struct struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret;
if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@@ -1400,7 -1414,8 +1415,8 @@@ cleanup_cls cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@@ -1445,16 -1460,19 +1461,19 @@@ static int __stmmac_test_l4filt(struct struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret;
if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@@ -1526,7 -1544,8 +1545,8 @@@ cleanup_cls cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@@ -1579,7 -1598,7 +1599,7 @@@ static int stmmac_test_arp_validate(str struct arphdr *ahdr;
ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) goto out;
ahdr = arp_hdr(skb); @@@ -1640,7 -1659,8 +1660,7 @@@ static int stmmac_test_arpoffload(struc if (ret) goto cleanup;
- skb_set_queue_mapping(skb, 0); - ret = dev_queue_xmit(skb); + ret = dev_direct_xmit(skb, 0); if (ret) goto cleanup_promisc;
@@@ -1728,68 -1748,6 +1748,68 @@@ static int stmmac_test_sph(struct stmma return 0; }
+static int stmmac_test_tbs(struct stmmac_priv *priv) +{ +#define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/ + struct stmmac_packet_attrs attr = { }; + struct tc_etf_qopt_offload qopt; + u64 start_time, curr_time = 0; + unsigned long flags; + int ret, i; + + if (!priv->hwts_tx_en) + return -EOPNOTSUPP; + + /* Find first TBS enabled Queue, if any */ + for (i = 0; i < priv->plat->tx_queues_to_use; i++) + if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL) + break; + + if (i >= priv->plat->tx_queues_to_use) + return -EOPNOTSUPP; + + qopt.enable = true; + qopt.queue = i; + + ret = stmmac_tc_setup_etf(priv, priv, &qopt); + if (ret) + return ret; + + spin_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_systime(priv, priv->ptpaddr, &curr_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + if (!curr_time) { + ret = -EOPNOTSUPP; + goto fail_disable; + } + + start_time = curr_time; + curr_time += STMMAC_TBS_LT_OFFSET; + + attr.dst = priv->dev->dev_addr; + attr.timestamp = curr_time; + attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET); + attr.queue_mapping = i; + + ret = __stmmac_test_loopback(priv, &attr); + if (ret) + goto fail_disable; + + /* Check if expected time has elapsed */ + spin_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_systime(priv, priv->ptpaddr, &curr_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET) + ret = -EINVAL; + +fail_disable: + qopt.enable = false; + stmmac_tc_setup_etf(priv, priv, &qopt); + return ret; +} + #define STMMAC_LOOPBACK_NONE 0 #define STMMAC_LOOPBACK_MAC 1 #define STMMAC_LOOPBACK_PHY 2 @@@ -1923,10 -1881,6 +1943,10 @@@ static const struct stmmac_test .name = "Split Header ", .lb = STMMAC_LOOPBACK_PHY, .fn = stmmac_test_sph, + }, { + .name = "TBS (ETF Scheduler) ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_tbs, }, };
@@@ -1935,6 -1889,7 +1955,6 @@@ void stmmac_selftest_run(struct net_dev { struct stmmac_priv *priv = netdev_priv(dev); int count = stmmac_selftest_get_count(priv); - int carrier = netif_carrier_ok(dev); int i, ret;
memset(buf, 0, sizeof(*buf) * count); @@@ -1944,12 -1899,15 +1964,12 @@@ netdev_err(priv->dev, "Only offline tests are supported\n"); etest->flags |= ETH_TEST_FL_FAILED; return; - } else if (!carrier) { + } else if (!netif_carrier_ok(dev)) { netdev_err(priv->dev, "You need valid Link to execute tests\n"); etest->flags |= ETH_TEST_FL_FAILED; return; }
- /* We don't want extra traffic */ - netif_carrier_off(dev); - /* Wait for queues drain */ msleep(200);
@@@ -2004,6 -1962,10 +2024,6 @@@ break; } } - - /* Restart everything */ - if (carrier) - netif_carrier_on(dev); }
void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index a4ce165af36b,9ffae12a2122..7a01dee2f9a8 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@@ -577,6 -577,10 +577,10 @@@ static int tc_setup_cls(struct stmmac_p { int ret = 0;
+ /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + switch (cls->command) { case FLOW_CLS_REPLACE: ret = tc_add_flow(priv, cls); @@@ -591,167 -595,9 +595,167 @@@ return ret; }
+static int tc_setup_taprio(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; + struct plat_stmmacenet_data *plat = priv->plat; + struct timespec64 time; + bool fpe = false; + int i, ret = 0; + u64 ctr; + + if (!priv->dma_cap.estsel) + return -EOPNOTSUPP; + + switch (wid) { + case 0x1: + wid = 16; + break; + case 0x2: + wid = 20; + break; + case 0x3: + wid = 24; + break; + default: + return -EOPNOTSUPP; + } + + switch (dep) { + case 0x1: + dep = 64; + break; + case 0x2: + dep = 128; + break; + case 0x3: + dep = 256; + break; + case 0x4: + dep = 512; + break; + case 0x5: + dep = 1024; + break; + default: + return -EOPNOTSUPP; + } + + if (!qopt->enable) + goto disable; + if (qopt->num_entries >= dep) + return -EINVAL; + if (!qopt->base_time) + return -ERANGE; + if (!qopt->cycle_time) + return -ERANGE; + + if (!plat->est) { + plat->est = devm_kzalloc(priv->device, sizeof(*plat->est), + GFP_KERNEL); + if (!plat->est) + return -ENOMEM; + } else { + memset(plat->est, 0, sizeof(*plat->est)); + } + + size = qopt->num_entries; + + priv->plat->est->gcl_size = size; + priv->plat->est->enable = qopt->enable; + + for (i = 0; i < size; i++) { + s64 delta_ns = qopt->entries[i].interval; + u32 gates = qopt->entries[i].gate_mask; + + if (delta_ns > GENMASK(wid, 0)) + return -ERANGE; + if (gates > GENMASK(31 - wid, 0)) + return -ERANGE; + + switch (qopt->entries[i].command) { + case TC_TAPRIO_CMD_SET_GATES: + if (fpe) + return -EINVAL; + break; + case TC_TAPRIO_CMD_SET_AND_HOLD: + gates |= BIT(0); + fpe = true; + break; + case TC_TAPRIO_CMD_SET_AND_RELEASE: + gates &= ~BIT(0); + fpe = true; + break; + default: + return -EOPNOTSUPP; + } + + priv->plat->est->gcl[i] = delta_ns | (gates << wid); + } + + /* Adjust for real system time */ + time = ktime_to_timespec64(qopt->base_time); + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + + ctr = qopt->cycle_time; + priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); + priv->plat->est->ctr[1] = (u32)ctr; + + if (fpe && !priv->dma_cap.fpesel) + return -EOPNOTSUPP; + + ret = stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, fpe); + if (ret && fpe) { + netdev_err(priv->dev, "failed to enable Frame Preemption\n"); + return ret; + } + + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + if (ret) { + netdev_err(priv->dev, "failed to configure EST\n"); + goto disable; + } + + netdev_info(priv->dev, "configured EST\n"); + return 0; + +disable: + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + return ret; +} + +static int tc_setup_etf(struct stmmac_priv *priv, + struct tc_etf_qopt_offload *qopt) +{ + if (!priv->dma_cap.tbssel) + return -EOPNOTSUPP; + if (qopt->queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) + return -EINVAL; + + if (qopt->enable) + priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; + else + priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; + + netdev_info(priv->dev, "%s ETF for Queue %d\n", + qopt->enable ? "enabled" : "disabled", qopt->queue); + return 0; +} + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, .setup_cbs = tc_setup_cbs, .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio, + .setup_etf = tc_setup_etf, }; diff --combined drivers/net/macvlan.c index 67a830dd0d30,c5bf61565726..81aa7adf4801 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@@ -259,7 -259,7 +259,7 @@@ static void macvlan_broadcast(struct sk struct net_device *src, enum macvlan_mode mode) { - const struct ethhdr *eth = skb_eth_hdr(skb); + const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; struct sk_buff *nskb; unsigned int i; @@@ -513,10 -513,11 +513,11 @@@ static int macvlan_queue_xmit(struct sk const struct macvlan_dev *dest;
if (vlan->mode == MACVLAN_MODE_BRIDGE) { - const struct ethhdr *eth = (void *)skb->data; + const struct ethhdr *eth = skb_eth_hdr(skb);
/* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } @@@ -1036,8 -1037,8 +1037,8 @@@ static int macvlan_ethtool_get_ts_info( const struct ethtool_ops *ops = real_dev->ethtool_ops; struct phy_device *phydev = real_dev->phydev;
- if (phydev && phydev->drv && phydev->drv->ts_info) { - return phydev->drv->ts_info(phydev, info); + if (phy_has_tsinfo(phydev)) { + return phy_ts_info(phydev, info); } else if (ops->get_ts_info) { return ops->get_ts_info(real_dev, info); } else { diff --combined drivers/net/netdevsim/dev.c index c572960bb54e,4b39aba2e9c4..b53fbc06e104 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@@ -53,7 -53,7 +53,7 @@@ static ssize_t nsim_dev_take_snapshot_w
get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
- id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev)); + id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev)); err = devlink_region_snapshot_create(nsim_dev->dummy_region, dummy_data, id, kfree); if (err) { @@@ -270,7 -270,7 +270,7 @@@ struct nsim_trap_data };
/* All driver-specific traps must be documented in - * Documentation/networking/devlink-trap-netdevsim.rst + * Documentation/networking/devlink/netdevsim.rst */ enum { NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX, diff --combined drivers/net/phy/Kconfig index ac82ff959b7c,8dc461f7574b..6b5ee26795a2 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@@ -324,12 -324,6 +324,12 @@@ config BROADCOM_PH Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, BCM5481, BCM54810 and BCM5482 PHYs.
+config BCM84881_PHY + bool "Broadcom BCM84881 PHY" + depends on PHYLIB=y + ---help--- + Support the Broadcom BCM84881 PHY. + config CICADA_PHY tristate "Cicada PHYs" ---help--- @@@ -346,14 -340,14 +346,14 @@@ config DAVICOM_PH Currently supports dm9161e and dm9131
config DP83822_PHY - tristate "Texas Instruments DP83822 PHY" + tristate "Texas Instruments DP83822/825 PHYs" ---help--- - Supports the DP83822 PHY. + Supports the DP83822 and DP83825I PHYs.
config DP83TC811_PHY - tristate "Texas Instruments DP83TC822 PHY" + tristate "Texas Instruments DP83TC811 PHY" ---help--- - Supports the DP83TC822 PHY. + Supports the DP83TC811 PHY.
config DP83848_PHY tristate "Texas Instruments DP83848 PHY" @@@ -437,9 -431,6 +437,9 @@@ config MICROCHIP_T1_PH
config MICROSEMI_PHY tristate "Microsemi PHYs" + depends on MACSEC || MACSEC=n + select CRYPTO_AES + select CRYPTO_ECB ---help--- Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
diff --combined drivers/net/phy/dp83867.c index adda0d0eab80,01cf71358359..967f57ed0b65 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@@ -93,12 -93,11 +93,13 @@@ #define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
/* PHY CTRL bits */ -#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 +#define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14 +#define DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT 12 #define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03 -#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14) +#define DP83867_PHYCR_TX_FIFO_DEPTH_MASK GENMASK(15, 14) +#define DP83867_PHYCR_RX_FIFO_DEPTH_MASK GENMASK(13, 12) #define DP83867_PHYCR_RESERVED_MASK BIT(11) + #define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf @@@ -133,8 -132,7 +134,8 @@@ enum struct dp83867_private { u32 rx_id_delay; u32 tx_id_delay; - u32 fifo_depth; + u32 tx_fifo_depth; + u32 rx_fifo_depth; int io_impedance; int port_mirroring; bool rxctrl_strap_quirk; @@@ -411,32 -409,18 +412,32 @@@ static int dp83867_of_init(struct phy_d dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
ret = of_property_read_u32(of_node, "ti,fifo-depth", - &dp83867->fifo_depth); + &dp83867->tx_fifo_depth); if (ret) { - phydev_err(phydev, - "ti,fifo-depth property is required\n"); - return ret; + ret = of_property_read_u32(of_node, "tx-fifo-depth", + &dp83867->tx_fifo_depth); + if (ret) + dp83867->tx_fifo_depth = + DP83867_PHYCR_FIFO_DEPTH_4_B_NIB; } - if (dp83867->fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) { - phydev_err(phydev, - "ti,fifo-depth value %u out of range\n", - dp83867->fifo_depth); + + if (dp83867->tx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) { + phydev_err(phydev, "tx-fifo-depth value %u out of range\n", + dp83867->tx_fifo_depth); + return -EINVAL; + } + + ret = of_property_read_u32(of_node, "rx-fifo-depth", + &dp83867->rx_fifo_depth); + if (ret) + dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB; + + if (dp83867->rx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) { + phydev_err(phydev, "rx-fifo-depth value %u out of range\n", + dp83867->rx_fifo_depth); return -EINVAL; } + return 0; } #else @@@ -475,31 -459,12 +476,31 @@@ static int dp83867_config_init(struct p phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, BIT(7));
+ if (phy_interface_is_rgmii(phydev) || + phydev->interface == PHY_INTERFACE_MODE_SGMII) { + val = phy_read(phydev, MII_DP83867_PHYCTRL); + if (val < 0) + return val; + + val &= ~DP83867_PHYCR_TX_FIFO_DEPTH_MASK; + val |= (dp83867->tx_fifo_depth << + DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT); + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + val &= ~DP83867_PHYCR_RX_FIFO_DEPTH_MASK; + val |= (dp83867->rx_fifo_depth << + DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT); + } + + ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); + if (ret) + return ret; + } + if (phy_interface_is_rgmii(phydev)) { val = phy_read(phydev, MII_DP83867_PHYCTRL); if (val < 0) return val; - val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK; - val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
/* The code below checks if "port mirroring" N/A MODE4 has been * enabled during power on bootstrap. @@@ -635,7 -600,12 +636,12 @@@ static int dp83867_phy_reset(struct phy
usleep_range(10, 20);
- return 0; + /* After reset FORCE_LINK_GOOD bit is set. Although the + * default value should be unset. Disable FORCE_LINK_GOOD + * for the phy to work properly. + */ + return phy_modify(phydev, MII_DP83867_PHYCTRL, + DP83867_PHYCR_FORCE_LINK_GOOD, 0); }
static struct phy_driver dp83867_driver[] = { diff --combined drivers/net/usb/lan78xx.c index d3239b49c3bb,75bdfae5f3e2..c391f2521ba1 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@@ -3660,7 -3660,7 +3660,7 @@@ static void lan78xx_disconnect(struct u usb_put_dev(udev); }
-static void lan78xx_tx_timeout(struct net_device *net) +static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue) { struct lan78xx_net *dev = netdev_priv(net);
@@@ -3750,6 -3750,7 +3750,7 @@@ static int lan78xx_probe(struct usb_int
/* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; diff --combined drivers/net/usb/r8152.c index fe22a582373b,031cb8fff909..36051288034a --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -1897,8 -1897,8 +1897,8 @@@ static void r8152_csum_workaround(struc { if (skb_shinfo(skb)->gso_size) { netdev_features_t features = tp->netdev->features; + struct sk_buff *segs, *seg, *next; struct sk_buff_head seg_list; - struct sk_buff *segs, *nskb;
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); segs = skb_gso_segment(skb, features); @@@ -1907,10 -1907,12 +1907,10 @@@
__skb_queue_head_init(&seg_list);
- do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - __skb_queue_tail(&seg_list, nskb); - } while (segs); + skb_list_walk_safe(segs, seg, next) { + skb_mark_not_on_list(seg); + __skb_queue_tail(&seg_list, seg); + }
skb_queue_splice(&seg_list, list); dev_kfree_skb(skb); @@@ -2505,7 -2507,7 +2505,7 @@@ static void rtl_drop_queued_tx(struct r } }
-static void rtl8152_tx_timeout(struct net_device *netdev) +static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct r8152 *tp = netdev_priv(netdev);
@@@ -6595,6 -6597,9 +6595,9 @@@ static int rtl8152_probe(struct usb_int return -ENODEV; }
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { diff --combined drivers/net/wan/fsl_ucc_hdlc.c index c28f8409067e,aef7de225783..3998cac49d7f --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@@ -73,7 -73,7 +73,7 @@@ static struct ucc_tdm_info utdm_primary }, };
- static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; + static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv) { @@@ -635,9 -635,11 +635,9 @@@ static irqreturn_t ucc_hdlc_irq_handler struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; struct net_device *dev = priv->ndev; struct ucc_fast_private *uccf; - struct ucc_tdm_info *ut_info; u32 ucce; u32 uccm;
- ut_info = priv->ut_info; uccf = priv->uccf;
ucce = ioread32be(uccf->p_ucce); @@@ -870,6 -872,7 +870,6 @@@ static void resume_clk_config(struct uc static int uhdlc_suspend(struct device *dev) { struct ucc_hdlc_private *priv = dev_get_drvdata(dev); - struct ucc_tdm_info *ut_info; struct ucc_fast __iomem *uf_regs;
if (!priv) @@@ -881,6 -884,7 +881,6 @@@ netif_device_detach(priv->ndev); napi_disable(&priv->napi);
- ut_info = priv->ut_info; uf_regs = priv->uf_regs;
/* backup gumr guemr*/ @@@ -913,7 -917,7 +913,7 @@@ static int uhdlc_resume(struct device * struct ucc_fast __iomem *uf_regs; struct ucc_fast_private *uccf; struct ucc_fast_info *uf_info; - int ret, i; + int i; u32 cecr_subblock; u16 bd_status;
@@@ -958,16 -962,16 +958,16 @@@
/* Write to QE CECR, UCCx channel to Stop Transmission */ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); - ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, - (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + qe_issue_cmd(QE_STOP_TX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Set UPSMR normal mode */ iowrite32be(0, &uf_regs->upsmr);
/* init parameter base */ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); - ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, - QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
priv->ucc_pram = (struct ucc_hdlc_param __iomem *) qe_muram_addr(priv->ucc_pram_offset); @@@ -1035,7 -1039,7 +1035,7 @@@ static const struct dev_pm_ops uhdlc_pm #define HDLC_PM_OPS NULL
#endif -static void uhdlc_tx_timeout(struct net_device *ndev) +static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue) { netdev_err(ndev, "%s\n", __func__); } diff --combined drivers/ptp/ptp_clock.c index da97a5bab26e,b84f16bbd6f2..ac1f2bf9e888 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@@ -170,6 -170,7 +170,7 @@@ static void ptp_clock_release(struct de { struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+ ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@@ -302,9 -303,8 +303,8 @@@ int ptp_clock_unregister(struct ptp_clo if (ptp->pps_source) pps_unregister_source(ptp->pps_source);
- ptp_cleanup_pin_groups(ptp); - posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); @@@ -368,12 -368,6 +368,12 @@@ int ptp_schedule_worker(struct ptp_cloc } EXPORT_SYMBOL(ptp_schedule_worker);
+void ptp_cancel_worker_sync(struct ptp_clock *ptp) +{ + kthread_cancel_delayed_work_sync(&ptp->aux_work); +} +EXPORT_SYMBOL(ptp_cancel_worker_sync); + /* module operations */
static void __exit ptp_exit(void) diff --combined include/net/devlink.h index 2813fd06ee89,38b4acb93f74..5e46c24bb6e6 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@@ -485,8 -485,6 +485,8 @@@ enum devlink_param_generic_id #define DEVLINK_INFO_VERSION_GENERIC_FW_UNDI "fw.undi" /* NCSI support/handler version */ #define DEVLINK_INFO_VERSION_GENERIC_FW_NCSI "fw.ncsi" +/* FW parameter set id */ +#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid"
struct devlink_region; struct devlink_info_req; @@@ -564,7 -562,7 +564,7 @@@ struct devlink_trap };
/* All traps must be documented in - * Documentation/networking/devlink-trap.rst + * Documentation/networking/devlink/devlink-trap.rst */ enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_SMAC_MC, @@@ -591,9 -589,6 +591,9 @@@ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE, DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS, DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS, + DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE, + DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR, + DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
/* Add new generic trap IDs above */ __DEVLINK_TRAP_GENERIC_ID_MAX, @@@ -601,13 -596,12 +601,13 @@@ };
/* All trap groups must be documented in - * Documentation/networking/devlink-trap.rst + * Documentation/networking/devlink/devlink-trap.rst */ enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, + DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS,
/* Add new generic trap group IDs above */ __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX, @@@ -663,12 -657,6 +663,12 @@@ "ipv4_lpm_miss" #define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \ "ipv6_lpm_miss" +#define DEVLINK_TRAP_GENERIC_NAME_NON_ROUTABLE \ + "non_routable_packet" +#define DEVLINK_TRAP_GENERIC_NAME_DECAP_ERROR \ + "decap_error" +#define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \ + "overlay_smac_is_mc"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ "l2_drops" @@@ -676,8 -664,6 +676,8 @@@ "l3_drops" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \ "buffer_drops" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \ + "tunnel_drops"
#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \ { \ @@@ -952,7 -938,7 +952,7 @@@ struct devlink_region *devlink_region_c u32 region_max_snapshots, u64 region_size); void devlink_region_destroy(struct devlink_region *region); - u32 devlink_region_shapshot_id_get(struct devlink *devlink); + u32 devlink_region_snapshot_id_get(struct devlink *devlink); int devlink_region_snapshot_create(struct devlink_region *region, u8 *data, u32 snapshot_id, devlink_snapshot_data_dest_t *data_destructor); @@@ -1014,8 -1000,6 +1014,8 @@@ int devlink_health_report(struct devlin void devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, enum devlink_health_reporter_state state); +void +devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
bool devlink_is_reload_failed(const struct devlink *devlink);
diff --combined include/net/tcp.h index 5e4133d09b9d,e6f48384dc71..2869d28ed5d8 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@@ -39,7 -39,6 +39,7 @@@ #include <net/tcp_states.h> #include <net/inet_ecn.h> #include <net/dst.h> +#include <net/mptcp.h>
#include <linux/seq_file.h> #include <linux/memcontrol.h> @@@ -183,7 -182,6 +183,7 @@@ void tcp_time_wait(struct sock *sk, in #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */ #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ /* Magic number to be after the option value for sharing TCP @@@ -330,9 -328,6 +330,9 @@@ int tcp_sendpage_locked(struct sock *sk size_t size, int flags); ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags); +int tcp_send_mss(struct sock *sk, int *size_goal, int flags); +void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, + int size_goal); void tcp_release_cb(struct sock *sk); void tcp_wfree(struct sk_buff *skb); void tcp_write_timer_handler(struct sock *sk); @@@ -982,13 -977,6 +982,13 @@@ static inline bool tcp_skb_can_collapse return likely(!TCP_SKB_CB(skb)->eor); }
+static inline bool tcp_skb_can_collapse(const struct sk_buff *to, + const struct sk_buff *from) +{ + return likely(tcp_skb_can_collapse_to(to) && + mptcp_skb_can_collapse(to, from)); +} + /* Events passed to congestion control interface */ enum tcp_ca_event { CA_EVENT_TX_START, /* first transmit when no packets in flight */ @@@ -1544,9 -1532,8 +1544,9 @@@ struct tcp_md5sig_key struct hlist_node node; u8 keylen; u8 family; /* AF_INET or AF_INET6 */ - union tcp_md5_addr addr; u8 prefixlen; + union tcp_md5_addr addr; + int l3index; /* set if key added with L3 scope */ u8 key[TCP_MD5SIG_MAXKEYLEN]; struct rcu_head rcu; }; @@@ -1590,33 -1577,34 +1590,33 @@@ struct tcp_md5sig_pool int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb); int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, - int family, u8 prefixlen, const u8 *newkey, u8 newkeylen, - gfp_t gfp); + int family, u8 prefixlen, int l3index, + const u8 *newkey, u8 newkeylen, gfp_t gfp); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, - int family, u8 prefixlen); + int family, u8 prefixlen, int l3index); struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG #include <linux/jump_label.h> extern struct static_key_false tcp_md5_needed; -struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, +struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, const union tcp_md5_addr *addr, int family); static inline struct tcp_md5sig_key * -tcp_md5_do_lookup(const struct sock *sk, - const union tcp_md5_addr *addr, - int family) +tcp_md5_do_lookup(const struct sock *sk, int l3index, + const union tcp_md5_addr *addr, int family) { if (!static_branch_unlikely(&tcp_md5_needed)) return NULL; - return __tcp_md5_do_lookup(sk, addr, family); + return __tcp_md5_do_lookup(sk, l3index, addr, family); }
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #else -static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, - const union tcp_md5_addr *addr, - int family) +static inline struct tcp_md5sig_key * +tcp_md5_do_lookup(const struct sock *sk, int l3index, + const union tcp_md5_addr *addr, int family) { return NULL; } @@@ -2014,11 -2002,6 +2014,11 @@@ struct tcp_request_sock_ops enum tcp_synack_type synack_type); };
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops; +#if IS_ENABLED(CONFIG_IPV6) +extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops; +#endif + #ifdef CONFIG_SYN_COOKIES static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, const struct sock *sk, struct sk_buff *skb, @@@ -2164,15 -2147,13 +2164,16 @@@ struct tcp_ulp_ops /* initialize ulp */ int (*init)(struct sock *sk); /* update ulp */ - void (*update)(struct sock *sk, struct proto *p); + void (*update)(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)); /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ int (*get_info)(const struct sock *sk, struct sk_buff *skb); size_t (*get_info_size)(const struct sock *sk); + /* clone ulp */ + void (*clone)(const struct request_sock *req, struct sock *newsk, + const gfp_t priority);
char name[TCP_ULP_NAME_MAX]; struct module *owner; @@@ -2182,7 -2163,8 +2183,8 @@@ void tcp_unregister_ulp(struct tcp_ulp_ int tcp_set_ulp(struct sock *sk, const char *name); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); - void tcp_update_ulp(struct sock *sk, struct proto *p); + void tcp_update_ulp(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk));
#define MODULE_ALIAS_TCP_ULP(name) \ __MODULE_INFO(alias, alias_userspace, name); \ diff --combined net/batman-adv/distributed-arp-table.c index 906011b60d66,ec7bf5a4a9fc..3d21dd83f8cc --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2020 B.A.T.M.A.N. contributors: * * Antonio Quartulli */ @@@ -246,7 -246,7 +246,7 @@@ static u8 *batadv_arp_hw_src(struct sk_ */ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size) { - return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN); + return *(__force __be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN); }
/** @@@ -270,9 -270,7 +270,9 @@@ static u8 *batadv_arp_hw_dst(struct sk_ */ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size) { - return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4); + u8 *dst = batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4; + + return *(__force __be32 *)dst; }
/** @@@ -287,16 -285,18 +287,18 @@@ static u32 batadv_hash_dat(const void * u32 hash = 0; const struct batadv_dat_entry *dat = data; const unsigned char *key; + __be16 vid; u32 i;
- key = (const unsigned char *)&dat->ip; + key = (__force const unsigned char *)&dat->ip; for (i = 0; i < sizeof(dat->ip); i++) { hash += key[i]; hash += (hash << 10); hash ^= (hash >> 6); }
- key = (const unsigned char *)&dat->vid; + vid = htons(dat->vid); + key = (__force const unsigned char *)&vid; for (i = 0; i < sizeof(dat->vid); i++) { hash += key[i]; hash += (hash << 10); diff --combined net/core/dev.c index d99f88c58636,7e885d069707..6368c94c9e0a --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -4932,6 -4932,7 +4932,6 @@@ static bool skb_pfmemalloc_protocol(str static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { -#ifdef CONFIG_NETFILTER_INGRESS if (nf_hook_ingress_active(skb)) { int ingress_retval;
@@@ -4945,6 -4946,7 +4945,6 @@@ rcu_read_unlock(); return ingress_retval; } -#endif /* CONFIG_NETFILTER_INGRESS */ return 0; }
@@@ -8540,17 -8542,7 +8540,17 @@@ static int dev_xdp_install(struct net_d struct netlink_ext_ack *extack, u32 flags, struct bpf_prog *prog) { + bool non_hw = !(flags & XDP_FLAGS_HW_MODE); + struct bpf_prog *prev_prog = NULL; struct netdev_bpf xdp; + int err; + + if (non_hw) { + prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op, + XDP_QUERY_PROG)); + if (IS_ERR(prev_prog)) + prev_prog = NULL; + }
memset(&xdp, 0, sizeof(xdp)); if (flags & XDP_FLAGS_HW_MODE) @@@ -8561,14 -8553,7 +8561,14 @@@ xdp.flags = flags; xdp.prog = prog;
- return bpf_op(dev, &xdp); + err = bpf_op(dev, &xdp); + if (!err && non_hw) + bpf_prog_change_xdp(prev_prog, prog); + + if (prev_prog) + bpf_prog_put(prev_prog); + + return err; }
static void dev_xdp_uninstall(struct net_device *dev) @@@ -9192,22 -9177,10 +9192,10 @@@ static void netdev_unregister_lockdep_k
void netdev_update_lockdep_key(struct net_device *dev) { - struct netdev_queue *queue; - int i; - - lockdep_unregister_key(&dev->qdisc_xmit_lock_key); lockdep_unregister_key(&dev->addr_list_lock_key); - - lockdep_register_key(&dev->qdisc_xmit_lock_key); lockdep_register_key(&dev->addr_list_lock_key);
lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); - for (i = 0; i < dev->num_tx_queues; i++) { - queue = netdev_get_tx_queue(dev, i); - - lockdep_set_class(&queue->_xmit_lock, - &dev->qdisc_xmit_lock_key); - } } EXPORT_SYMBOL(netdev_update_lockdep_key);
diff --combined net/core/devlink.c index e5b19bd2cbe2,f76219bf0c21..64367eeb21e6 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@@ -4844,12 -4844,21 +4844,12 @@@ devlink_health_reporter_destroy(struct EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
void -devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, - enum devlink_health_reporter_state state) +devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter) { - if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY && - state != DEVLINK_HEALTH_REPORTER_STATE_ERROR)) - return; - - if (reporter->health_state == state) - return; - - reporter->health_state = state; - trace_devlink_health_reporter_state_update(reporter->devlink, - reporter->ops->name, state); + reporter->recovery_count++; + reporter->last_recovery_ts = jiffies; } -EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update); +EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
static int devlink_health_reporter_recover(struct devlink_health_reporter *reporter, @@@ -4867,8 -4876,9 +4867,8 @@@ if (err) return err;
- reporter->recovery_count++; + devlink_health_reporter_recovery_done(reporter); reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; - reporter->last_recovery_ts = jiffies;
return 0; } @@@ -5080,48 -5090,6 +5080,48 @@@ genlmsg_cancel return -EMSGSIZE; }
+static void devlink_recover_notify(struct devlink_health_reporter *reporter, + enum devlink_command cmd) +{ + struct sk_buff *msg; + int err; + + WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + err = devlink_nl_health_reporter_fill(msg, reporter->devlink, + reporter, cmd, 0, 0, 0); + if (err) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&devlink_nl_family, + devlink_net(reporter->devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); +} + +void +devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, + enum devlink_health_reporter_state state) +{ + if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY && + state != DEVLINK_HEALTH_REPORTER_STATE_ERROR)) + return; + + if (reporter->health_state == state) + return; + + reporter->health_state = state; + trace_devlink_health_reporter_state_update(reporter->devlink, + reporter->ops->name, state); + devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER); +} +EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update); + static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb, struct genl_info *info) { @@@ -6438,7 -6406,7 +6438,7 @@@ static bool devlink_port_type_should_wa devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA; }
- #define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30) + #define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port) { @@@ -7595,7 -7563,7 +7595,7 @@@ void devlink_region_destroy(struct devl EXPORT_SYMBOL_GPL(devlink_region_destroy);
/** - * devlink_region_shapshot_id_get - get snapshot ID + * devlink_region_snapshot_id_get - get snapshot ID * * This callback should be called when adding a new snapshot, * Driver should use the same id for multiple snapshots taken @@@ -7603,7 -7571,7 +7603,7 @@@ * * @devlink: devlink */ - u32 devlink_region_shapshot_id_get(struct devlink *devlink) + u32 devlink_region_snapshot_id_get(struct devlink *devlink) { u32 id;
@@@ -7613,7 -7581,7 +7613,7 @@@
return id; } - EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get); + EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
/** * devlink_region_snapshot_create - create a new snapshot @@@ -7706,9 -7674,6 +7706,9 @@@ static const struct devlink_trap devlin DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION), DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION), DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION), + DEVLINK_TRAP(NON_ROUTABLE, DROP), + DEVLINK_TRAP(DECAP_ERROR, EXCEPTION), + DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP), };
#define DEVLINK_TRAP_GROUP(_id) \ @@@ -7721,7 -7686,6 +7721,7 @@@ static const struct devlink_trap_group DEVLINK_TRAP_GROUP(L2_DROPS), DEVLINK_TRAP_GROUP(L3_DROPS), DEVLINK_TRAP_GROUP(BUFFER_DROPS), + DEVLINK_TRAP_GROUP(TUNNEL_DROPS), };
static int devlink_trap_generic_verify(const struct devlink_trap *trap) diff --combined net/core/filter.c index ef01c5599501,538f6a735a19..1012b70a7c84 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -2231,10 -2231,10 +2231,10 @@@ BPF_CALL_4(bpf_msg_pull_data, struct sk /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += len; len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; - offset += len; sk_msg_iter_var_next(i); } while (i != msg->sg.end);
@@@ -2346,7 -2346,7 +2346,7 @@@ BPF_CALL_4(bpf_msg_push_data, struct sk u32, len, u64, flags) { struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; - u32 new, i = 0, l, space, copy = 0, offset = 0; + u32 new, i = 0, l = 0, space, copy = 0, offset = 0; u8 *raw, *to, *from; struct page *page;
@@@ -2356,11 -2356,11 +2356,11 @@@ /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += l; l = sk_msg_elem(msg, i)->length;
if (start < offset + l) break; - offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end);
@@@ -2415,6 -2415,7 +2415,7 @@@
sk_msg_iter_var_next(i); sg_unmark_end(psge); + sg_unmark_end(&rsge); sk_msg_iter_next(msg, end); }
@@@ -2506,7 -2507,7 +2507,7 @@@ static void sk_msg_shift_right(struct s BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { - u32 i = 0, l, space, offset = 0; + u32 i = 0, l = 0, space, offset = 0; u64 last = start + len; int pop;
@@@ -2516,11 -2517,11 +2517,11 @@@ /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += l; l = sk_msg_elem(msg, i)->length;
if (start < offset + l) break; - offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end);
@@@ -3511,16 -3512,36 +3512,16 @@@ err }
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, - struct bpf_map *map, - struct xdp_buff *xdp, - u32 index) + struct bpf_map *map, struct xdp_buff *xdp) { - int err; - switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: - case BPF_MAP_TYPE_DEVMAP_HASH: { - struct bpf_dtab_netdev *dst = fwd; - - err = dev_map_enqueue(dst, xdp, dev_rx); - if (unlikely(err)) - return err; - break; - } - case BPF_MAP_TYPE_CPUMAP: { - struct bpf_cpu_map_entry *rcpu = fwd; - - err = cpu_map_enqueue(rcpu, xdp, dev_rx); - if (unlikely(err)) - return err; - break; - } - case BPF_MAP_TYPE_XSKMAP: { - struct xdp_sock *xs = fwd; - - err = __xsk_map_redirect(map, xdp, xs); - return err; - } + case BPF_MAP_TYPE_DEVMAP_HASH: + return dev_map_enqueue(fwd, xdp, dev_rx); + case BPF_MAP_TYPE_CPUMAP: + return cpu_map_enqueue(fwd, xdp, dev_rx); + case BPF_MAP_TYPE_XSKMAP: + return __xsk_map_redirect(fwd, xdp); default: break; } @@@ -3529,9 -3550,26 +3530,9 @@@
void xdp_do_flush_map(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - struct bpf_map *map = ri->map_to_flush; - - ri->map_to_flush = NULL; - if (map) { - switch (map->map_type) { - case BPF_MAP_TYPE_DEVMAP: - case BPF_MAP_TYPE_DEVMAP_HASH: - __dev_map_flush(map); - break; - case BPF_MAP_TYPE_CPUMAP: - __cpu_map_flush(map); - break; - case BPF_MAP_TYPE_XSKMAP: - __xsk_map_flush(map); - break; - default: - break; - } - } + __dev_map_flush(); + __cpu_map_flush(); + __xsk_map_flush(); } EXPORT_SYMBOL_GPL(xdp_do_flush_map);
@@@ -3580,10 -3618,14 +3581,10 @@@ static int xdp_do_redirect_map(struct n ri->tgt_value = NULL; WRITE_ONCE(ri->map, NULL);
- if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) - xdp_do_flush_map(); - - err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); + err = __bpf_tx_xdp_map(dev, fwd, map, xdp); if (unlikely(err)) goto err;
- ri->map_to_flush = map; _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); return 0; err: @@@ -5277,8 -5319,7 +5278,7 @@@ __bpf_sk_lookup(struct sk_buff *skb, st if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { - if (!sock_flag(sk, SOCK_RCU_FREE)) - sock_gen_put(sk); + sock_gen_put(sk); return NULL; } } @@@ -5315,8 -5356,7 +5315,7 @@@ bpf_sk_lookup(struct sk_buff *skb, stru if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { - if (!sock_flag(sk, SOCK_RCU_FREE)) - sock_gen_put(sk); + sock_gen_put(sk); return NULL; } } @@@ -5383,7 -5423,8 +5382,8 @@@ static const struct bpf_func_proto bpf_
BPF_CALL_1(bpf_sk_release, struct sock *, sk) { - if (!sock_flag(sk, SOCK_RCU_FREE)) + /* Only full sockets have sk->sk_flags. */ + if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return 0; } @@@ -7607,21 -7648,21 +7607,21 @@@ u32 bpf_sock_convert_ctx_access(enum bp break;
case offsetof(struct bpf_sock, type): - BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2); - *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, - offsetof(struct sock, __sk_flags_offset)); - *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); - *target_size = 2; + *insn++ = BPF_LDX_MEM( + BPF_FIELD_SIZEOF(struct sock, sk_type), + si->dst_reg, si->src_reg, + bpf_target_off(struct sock, sk_type, + sizeof_field(struct sock, sk_type), + target_size)); break;
case offsetof(struct bpf_sock, protocol): - BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); - *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, - offsetof(struct sock, __sk_flags_offset)); - *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); - *target_size = 1; + *insn++ = BPF_LDX_MEM( + BPF_FIELD_SIZEOF(struct sock, sk_protocol), + si->dst_reg, si->src_reg, + bpf_target_off(struct sock, sk_protocol, + sizeof_field(struct sock, sk_protocol), + target_size)); break;
case offsetof(struct bpf_sock, src_ip4): @@@ -7903,13 -7944,20 +7903,13 @@@ static u32 sock_addr_convert_ctx_access break;
case offsetof(struct bpf_sock_addr, type): - SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( - struct bpf_sock_addr_kern, struct sock, sk, - __sk_flags_offset, BPF_W, 0); - *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); + SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, + struct sock, sk, sk_type); break;
case offsetof(struct bpf_sock_addr, protocol): - SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( - struct bpf_sock_addr_kern, struct sock, sk, - __sk_flags_offset, BPF_W, 0); - *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, - SK_FL_PROTO_SHIFT); + SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, + struct sock, sk, sk_protocol); break;
case offsetof(struct bpf_sock_addr, msg_src_ip4): @@@ -8828,11 -8876,11 +8828,11 @@@ sk_reuseport_is_valid_access(int off, i skb, \ SKB_FIELD)
-#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \ - SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \ - struct sock, \ - sk, \ - SK_FIELD, BPF_SIZE, EXTRA_OFF) +#define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \ + SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ + struct sock, \ + sk, \ + SK_FIELD)
static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, @@@ -8856,7 -8904,16 +8856,7 @@@ break;
case offsetof(struct sk_reuseport_md, ip_protocol): - BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); - SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, - BPF_W, 0); - *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); - *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, - SK_FL_PROTO_SHIFT); - /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian - * aware. No further narrowing or masking is needed. - */ - *target_size = 1; + SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol); break;
case offsetof(struct sk_reuseport_md, data_end): @@@ -8884,11 -8941,3 +8884,11 @@@ const struct bpf_verifier_ops sk_reusep const struct bpf_prog_ops sk_reuseport_prog_ops = { }; #endif /* CONFIG_INET */ + +DEFINE_BPF_DISPATCHER(bpf_dispatcher_xdp) + +void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) +{ + bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(bpf_dispatcher_xdp), + prev_prog, prog); +} diff --combined net/ipv4/fib_trie.c index 6ce1f2bbffd0,195469a13371..ff0c24371e33 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@@ -980,12 -980,9 +980,12 @@@ static struct key_vector *fib_find_node
/* Return the first fib alias matching TOS with * priority less than or equal to PRIO. + * If 'find_first' is set, return the first matching + * fib alias, regardless of TOS and priority. */ static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, - u8 tos, u32 prio, u32 tb_id) + u8 tos, u32 prio, u32 tb_id, + bool find_first) { struct fib_alias *fa;
@@@ -1001,8 -998,6 +1001,8 @@@ continue; if (fa->tb_id != tb_id) break; + if (find_first) + return fa; if (fa->fa_tos > tos) continue; if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos) @@@ -1012,52 -1007,6 +1012,52 @@@ return NULL; }
+static struct fib_alias * +fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri) +{ + u8 slen = KEYLENGTH - fri->dst_len; + struct key_vector *l, *tp; + struct fib_table *tb; + struct fib_alias *fa; + struct trie *t; + + tb = fib_get_table(net, fri->tb_id); + if (!tb) + return NULL; + + t = (struct trie *)tb->tb_data; + l = fib_find_node(t, &tp, be32_to_cpu(fri->dst)); + if (!l) + return NULL; + + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + if (fa->fa_slen == slen && fa->tb_id == fri->tb_id && + fa->fa_tos == fri->tos && fa->fa_info == fri->fi && + fa->fa_type == fri->type) + return fa; + } + + return NULL; +} + +void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri) +{ + struct fib_alias *fa_match; + + rcu_read_lock(); + + fa_match = fib_find_matching_alias(net, fri); + if (!fa_match) + goto out; + + fa_match->offload = fri->offload; + fa_match->trap = fri->trap; + +out: + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(fib_alias_hw_flags_set); + static void trie_rebalance(struct trie *t, struct key_vector *tn) { while (!IS_TRIE(tn)) @@@ -1114,6 -1063,9 +1114,6 @@@ noleaf return -ENOMEM; }
-/* fib notifier for ADD is sent before calling fib_insert_alias with - * the expectation that the only possible failure ENOMEM - */ static int fib_insert_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *new, struct fib_alias *fa, t_key key) @@@ -1166,13 -1118,11 +1166,13 @@@ static bool fib_valid_key_len(u32 key, return true; }
+static void fib_remove_alias(struct trie *t, struct key_vector *tp, + struct key_vector *l, struct fib_alias *old); + /* Caller must hold RTNL. */ int fib_table_insert(struct net *net, struct fib_table *tb, struct fib_config *cfg, struct netlink_ext_ack *extack) { - enum fib_event_type event = FIB_EVENT_ENTRY_ADD; struct trie *t = (struct trie *)tb->tb_data; struct fib_alias *fa, *new_fa; struct key_vector *l, *tp; @@@ -1199,7 -1149,7 +1199,7 @@@
l = fib_find_node(t, &tp, key); fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority, - tb->tb_id) : NULL; + tb->tb_id, false) : NULL;
/* Now fa, if non-NULL, points to the first fib alias * with the same keys [prefix,tos,priority], if such key already @@@ -1266,29 -1216,19 +1266,29 @@@ new_fa->fa_slen = fa->fa_slen; new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; + new_fa->offload = 0; + new_fa->trap = 0;
- err = call_fib_entry_notifiers(net, - FIB_EVENT_ENTRY_REPLACE, - key, plen, new_fa, - extack); - if (err) - goto out_free_new_fa; + hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list); + + if (fib_find_alias(&l->leaf, fa->fa_slen, 0, 0, + tb->tb_id, true) == new_fa) { + enum fib_event_type fib_event; + + fib_event = FIB_EVENT_ENTRY_REPLACE; + err = call_fib_entry_notifiers(net, fib_event, + key, plen, + new_fa, extack); + if (err) { + hlist_replace_rcu(&new_fa->fa_list, + &fa->fa_list); + goto out_free_new_fa; + } + }
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, nlflags);
- hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list); - alias_free_mem_rcu(fa);
fib_release_info(fi_drop); @@@ -1304,10 -1244,12 +1304,10 @@@ if (fa_match) goto out;
- if (cfg->fc_nlflags & NLM_F_APPEND) { - event = FIB_EVENT_ENTRY_APPEND; + if (cfg->fc_nlflags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; - } else { + else fa = fa_first; - } } err = -ENOENT; if (!(cfg->fc_nlflags & NLM_F_CREATE)) @@@ -1326,29 -1268,15 +1326,29 @@@ new_fa->fa_slen = slen; new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; - - err = call_fib_entry_notifiers(net, event, key, plen, new_fa, extack); - if (err) - goto out_free_new_fa; + new_fa->offload = 0; + new_fa->trap = 0;
/* Insert new entry to the list. */ err = fib_insert_alias(t, tp, l, new_fa, fa, key); if (err) - goto out_fib_notif; + goto out_free_new_fa; + + /* The alias was already inserted, so the node must exist. */ + l = l ? l : fib_find_node(t, &tp, key); + if (WARN_ON_ONCE(!l)) + goto out_free_new_fa; + + if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == + new_fa) { + enum fib_event_type fib_event; + + fib_event = FIB_EVENT_ENTRY_REPLACE; + err = call_fib_entry_notifiers(net, fib_event, key, plen, + new_fa, extack); + if (err) + goto out_remove_new_fa; + }
if (!plen) tb->tb_num_default++; @@@ -1359,8 -1287,14 +1359,8 @@@ succeeded: return 0;
-out_fib_notif: - /* notifier was sent that entry would be added to trie, but - * the add failed and need to recover. Only failure for - * fib_insert_alias is ENOMEM. - */ - NL_SET_ERR_MSG(extack, "Failed to insert route into trie"); - call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, - plen, new_fa, NULL); +out_remove_new_fa: + fib_remove_alias(t, tp, l, new_fa); out_free_new_fa: kmem_cache_free(fn_alias_kmem, new_fa); out: @@@ -1611,36 -1545,6 +1611,36 @@@ static void fib_remove_alias(struct tri node_pull_suffix(tp, fa->fa_slen); }
+static void fib_notify_alias_delete(struct net *net, u32 key, + struct hlist_head *fah, + struct fib_alias *fa_to_delete, + struct netlink_ext_ack *extack) +{ + struct fib_alias *fa_next, *fa_to_notify; + u32 tb_id = fa_to_delete->tb_id; + u8 slen = fa_to_delete->fa_slen; + enum fib_event_type fib_event; + + /* Do not notify if we do not care about the route. */ + if (fib_find_alias(fah, slen, 0, 0, tb_id, true) != fa_to_delete) + return; + + /* Determine if the route should be replaced by the next route in the + * list. + */ + fa_next = hlist_entry_safe(fa_to_delete->fa_list.next, + struct fib_alias, fa_list); + if (fa_next && fa_next->fa_slen == slen && fa_next->tb_id == tb_id) { + fib_event = FIB_EVENT_ENTRY_REPLACE; + fa_to_notify = fa_next; + } else { + fib_event = FIB_EVENT_ENTRY_DEL; + fa_to_notify = fa_to_delete; + } + call_fib_entry_notifiers(net, fib_event, key, KEYLENGTH - slen, + fa_to_notify, extack); +} + /* Caller must hold RTNL. */ int fib_table_delete(struct net *net, struct fib_table *tb, struct fib_config *cfg, struct netlink_ext_ack *extack) @@@ -1662,7 -1566,7 +1662,7 @@@ if (!l) return -ESRCH;
- fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id); + fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id, false); if (!fa) return -ESRCH;
@@@ -1694,7 -1598,8 +1694,7 @@@ if (!fa_to_delete) return -ESRCH;
- call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, plen, - fa_to_delete, extack); + fib_notify_alias_delete(net, key, &l->leaf, fa_to_delete, extack); rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, &cfg->fc_nlinfo, 0);
@@@ -2018,8 -1923,10 +2018,8 @@@ int fib_table_flush(struct net *net, st continue; }
- call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, - n->key, - KEYLENGTH - fa->fa_slen, fa, - NULL); + fib_notify_alias_delete(net, n->key, &n->leaf, fa, + NULL); hlist_del_rcu(&fa->fa_list); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); @@@ -2115,7 -2022,6 +2115,7 @@@ static int fib_leaf_notify(struct key_v struct netlink_ext_ack *extack) { struct fib_alias *fa; + int last_slen = -1; int err;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { @@@ -2130,12 -2036,8 +2130,12 @@@ if (tb->tb_id != fa->tb_id) continue;
- err = call_fib_entry_notifier(nb, FIB_EVENT_ENTRY_ADD, l->key, - KEYLENGTH - fa->fa_slen, + if (fa->fa_slen == last_slen) + continue; + + last_slen = fa->fa_slen; + err = call_fib_entry_notifier(nb, FIB_EVENT_ENTRY_REPLACE, + l->key, KEYLENGTH - fa->fa_slen, fa, extack); if (err) return err; @@@ -2244,20 -2146,14 +2244,20 @@@ static int fn_trie_dump_leaf(struct key
if (filter->dump_routes) { if (!s_fa) { + struct fib_rt_info fri; + + fri.fi = fi; + fri.tb_id = tb->tb_id; + fri.dst = xkey; + fri.dst_len = KEYLENGTH - fa->fa_slen; + fri.tos = fa->fa_tos; + fri.type = fa->fa_type; + fri.offload = fa->offload; + fri.trap = fa->trap; err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RTM_NEWROUTE, - tb->tb_id, fa->fa_type, - xkey, - KEYLENGTH - fa->fa_slen, - fa->fa_tos, fi, flags); + RTM_NEWROUTE, &fri, flags); if (err < 0) goto stop; } @@@ -2297,6 -2193,12 +2297,12 @@@ int fib_table_dump(struct fib_table *tb int count = cb->args[2]; t_key key = cb->args[3];
+ /* First time here, count and key are both always 0. Count > 0 + * and key == 0 means the dump has wrapped around and we are done. + */ + if (count && !key) + return skb->len; + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { int err;
diff --combined net/ipv4/tcp_input.c index 2914fdf1d543,5347ab2c9c58..358365598216 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -915,9 -915,10 +915,10 @@@ static void tcp_check_sack_reordering(s /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { - if (!tp->retransmit_skb_hint || - before(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) + if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || + (tp->retransmit_skb_hint && + before(TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) tp->retransmit_skb_hint = skb; }
@@@ -1422,7 -1423,7 +1423,7 @@@ static struct sk_buff *tcp_shift_skb_da if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback;
- if (!tcp_skb_can_collapse_to(prev)) + if (!tcp_skb_can_collapse(prev, skb)) goto fallback;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && @@@ -3553,7 -3554,7 +3554,7 @@@ static void tcp_xmit_recovery(struct so if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT) return;
- if (unlikely(rexmit == 2)) { + if (unlikely(rexmit == REXMIT_NEW)) { __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); if (after(tp->snd_nxt, tp->high_seq)) @@@ -4423,9 -4424,6 +4424,9 @@@ static bool tcp_try_coalesce(struct soc if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false;
+ if (!mptcp_skb_can_collapse(to, from)) + return false; + #ifdef CONFIG_TLS_DEVICE if (from->decrypted != to->decrypted) return false; @@@ -4935,7 -4933,7 +4936,7 @@@ restart /* The first skb to collapse is: * - not SYN/FIN and * - bloated or contains data before "start" or - * overlaps to the next one. + * overlaps to the next one and mptcp allow collapsing. */ if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && (tcp_win_from_space(sk, skb->truesize) > skb->len || @@@ -4944,7 -4942,7 +4945,7 @@@ break; }
- if (n && n != tail && + if (n && n != tail && mptcp_skb_can_collapse(skb, n) && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { end_of_skbs = false; break; @@@ -4977,7 -4975,6 +4978,7 @@@ else __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */ skb_set_owner_r(nskb, sk); + mptcp_skb_ext_move(nskb, skb);
/* Copy data, releasing collapsed skbs. */ while (copy > 0) { @@@ -4997,7 -4994,6 +4998,7 @@@ skb = tcp_collapse_one(sk, skb, list, root); if (!skb || skb == tail || + !mptcp_skb_can_collapse(nskb, skb) || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) goto end; #ifdef CONFIG_TLS_DEVICE diff --combined net/netfilter/nft_tunnel.c index 23cd163689d5,5284fcf16be7..4c3f2e24c7cb --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@@ -76,7 -76,7 +76,7 @@@ static int nft_tunnel_get_init(const st struct nft_tunnel *priv = nft_expr_priv(expr); u32 len;
- if (!tb[NFTA_TUNNEL_KEY] && + if (!tb[NFTA_TUNNEL_KEY] || !tb[NFTA_TUNNEL_DREG]) return -EINVAL;
@@@ -248,9 -248,8 +248,9 @@@ static int nft_tunnel_obj_vxlan_init(co }
static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = { + [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 }, [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 }, - [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, + [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 }, };
@@@ -267,6 -266,9 +267,9 @@@ static int nft_tunnel_obj_erspan_init(c if (err < 0) return err;
+ if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]) + return -EINVAL; + version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])); switch (version) { case ERSPAN_VERSION: @@@ -443,15 -445,10 +446,15 @@@ static int nft_tunnel_ip_dump(struct sk if (!nest) return -1;
- if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 || - nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 || - nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label)) + if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, + &info->key.u.ipv6.src) < 0 || + nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, + &info->key.u.ipv6.dst) < 0 || + nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, + info->key.label)) { + nla_nest_cancel(skb, nest); return -1; + }
nla_nest_end(skb, nest); } else { @@@ -459,13 -456,9 +462,13 @@@ if (!nest) return -1;
- if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 || - nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0) + if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, + info->key.u.ipv4.src) < 0 || + nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, + info->key.u.ipv4.dst) < 0) { + nla_nest_cancel(skb, nest); return -1; + }
nla_nest_end(skb, nest); } @@@ -477,58 -470,42 +480,58 @@@ static int nft_tunnel_opts_dump(struct struct nft_tunnel_obj *priv) { struct nft_tunnel_opts *opts = &priv->opts; - struct nlattr *nest; + struct nlattr *nest, *inner;
nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS); if (!nest) return -1;
if (opts->flags & TUNNEL_VXLAN_OPT) { + inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN); + if (!inner) + goto failure; if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP, htonl(opts->u.vxlan.gbp))) - return -1; + goto inner_failure; + nla_nest_end(skb, inner); } else if (opts->flags & TUNNEL_ERSPAN_OPT) { + inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN); + if (!inner) + goto failure; + if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION, + htonl(opts->u.erspan.version))) + goto inner_failure; switch (opts->u.erspan.version) { case ERSPAN_VERSION: if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX, opts->u.erspan.u.index)) - return -1; + goto inner_failure; break; case ERSPAN_VERSION2: if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID, get_hwid(&opts->u.erspan.u.md2)) || nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR, opts->u.erspan.u.md2.dir)) - return -1; + goto inner_failure; break; } + nla_nest_end(skb, inner); } nla_nest_end(skb, nest); - return 0; + +inner_failure: + nla_nest_cancel(skb, inner); +failure: + nla_nest_cancel(skb, nest); + return -1; }
static int nft_tunnel_ports_dump(struct sk_buff *skb, struct ip_tunnel_info *info) { - if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 || - nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0) + if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 || + nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0) return -1;
return 0; diff --combined net/wireless/nl80211.c index fa3526592c51,1e97ac5435b2..123b8d720a59 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -10843,6 -10843,7 +10843,7 @@@ static int cfg80211_cqm_rssi_update(str if (err) return err;
+ cfg80211_sinfo_release_content(&sinfo); if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; @@@ -12900,7 -12901,8 +12901,7 @@@ static int nl80211_vendor_check_policy( return -EINVAL; }
- return nl80211_validate_nested(attr, vcmd->maxattr, vcmd->policy, - extack); + return nla_validate_nested(attr, vcmd->maxattr, vcmd->policy, extack); }
static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) @@@ -13795,6 -13797,8 +13796,8 @@@ static int nl80211_probe_mesh_link(stru if (err) return err;
+ cfg80211_sinfo_release_content(&sinfo); + return rdev_probe_mesh_link(rdev, dev, dest, buf, len); }
linux-merge@lists.open-mesh.org