The following commit has been merged in the master branch: commit 3be042cf46feeedf664152d063376b5c17026d1d Merge: b6b614558ed5b2ca50edacc0f2fbf5f52158c86c 1f719a2f3fa67665578c759ac34fd3d3690c1a20 Author: Jakub Kicinski kuba@kernel.org Date: Thu Feb 8 15:20:37 2024 -0800
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
No conflicts.
Adjacent changes:
drivers/net/ethernet/stmicro/stmmac/common.h 38cc3c6dcc09 ("net: stmmac: protect updates of 64-bit statistics counters") fd5a6a71313e ("net: stmmac: est: Per Tx-queue error count for HLBF") c5c3e1bfc9e0 ("net: stmmac: Offload queueMaxSDU from tc-taprio")
drivers/net/wireless/microchip/wilc1000/netdev.c c9013880284d ("wifi: fill in MODULE_DESCRIPTION()s for wilc1000") 328efda22af8 ("wifi: wilc1000: do not realloc workqueue everytime an interface is added")
net/unix/garbage.c 11498715f266 ("af_unix: Remove io_uring code for GC.") 1279f9d9dec2 ("af_unix: Call kfree_skb() for dead unix_(sk)->oob_skb in GC.")
Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --combined MAINTAINERS index 3f465fd778b1,3dfe7ea25320..83e20516ebff --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -3799,7 -3799,6 +3799,7 @@@ M: Alexei Starovoitov <ast@kernel.org M: Daniel Borkmann daniel@iogearbox.net M: Andrii Nakryiko andrii@kernel.org R: Martin KaFai Lau martin.lau@linux.dev +R: Eduard Zingerman eddyz87@gmail.com R: Song Liu song@kernel.org R: Yonghong Song yonghong.song@linux.dev R: John Fastabend john.fastabend@gmail.com @@@ -3860,7 -3859,6 +3860,7 @@@ F: net/unix/unix_bpf.
BPF [LIBRARY] (libbpf) M: Andrii Nakryiko andrii@kernel.org +M: Eduard Zingerman eddyz87@gmail.com L: bpf@vger.kernel.org S: Maintained F: tools/lib/bpf/ @@@ -3918,7 -3916,6 +3918,7 @@@ F: security/bpf
BPF [SELFTESTS] (Test Runners & Infrastructure) M: Andrii Nakryiko andrii@kernel.org +M: Eduard Zingerman eddyz87@gmail.com R: Mykola Lysenko mykolal@fb.com L: bpf@vger.kernel.org S: Maintained @@@ -4172,14 -4169,14 +4172,14 @@@ F: drivers/firmware/broadcom/tee_bnxt_f F: drivers/net/ethernet/broadcom/bnxt/ F: include/linux/firmware/broadcom/tee_bnxt_fw.h
- BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER - M: Arend van Spriel aspriel@gmail.com - M: Franky Lin franky.lin@broadcom.com - M: Hante Meuleman hante.meuleman@broadcom.com + BROADCOM BRCM80211 IEEE802.11 WIRELESS DRIVERS + M: Arend van Spriel arend.vanspriel@broadcom.com L: linux-wireless@vger.kernel.org + L: brcm80211@lists.linux.dev L: brcm80211-dev-list.pdl@broadcom.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/ + F: include/linux/platform_data/brcmfmac.h
BROADCOM BRCMSTB GPIO DRIVER M: Doug Berger opendmb@gmail.com @@@ -10094,7 -10091,7 +10094,7 @@@ L: linux-i2c@vger.kernel.or S: Maintained W: https://i2c.wiki.kernel.org/ Q: https://patchwork.ozlabs.org/project/linux-i2c/list/ - T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux.git F: Documentation/devicetree/bindings/i2c/ F: drivers/i2c/algos/ F: drivers/i2c/busses/ @@@ -11130,7 -11127,6 +11130,6 @@@ S: Supporte F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi) - M: Gregory Greenman gregory.greenman@intel.com M: Miri Korenblit miriam.rachel.korenblit@intel.com L: linux-wireless@vger.kernel.org S: Supported @@@ -15087,7 -15083,6 +15086,7 @@@ NETDEVSI M: Jakub Kicinski kuba@kernel.org S: Maintained F: drivers/net/netdevsim/* +F: tools/testing/selftests/drivers/net/netdevsim/*
NETEM NETWORK EMULATOR M: Stephen Hemminger stephen@networkplumber.org @@@ -16866,9 -16861,8 +16865,8 @@@ F: Documentation/devicetree/bindings/pc F: drivers/pci/controller/pcie-xilinx-cpm.c
PCI ENDPOINT SUBSYSTEM - M: Lorenzo Pieralisi lpieralisi@kernel.org + M: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org M: Krzysztof Wilczyński kw@linux.com - R: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org R: Kishon Vijay Abraham I kishon@kernel.org L: linux-pci@vger.kernel.org S: Supported @@@ -18019,13 -18013,6 +18017,13 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml F: drivers/net/wireless/ath/ath9k/
+QUALCOMM ATHEROS QCA7K ETHERNET DRIVER +M: Stefan Wahren wahrenst@gmx.net +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/qca,qca7000.txt +F: drivers/net/ethernet/qualcomm/qca* + QUALCOMM BAM-DMUX WWAN NETWORK DRIVER M: Stephan Gerhold stephan@gerhold.net L: netdev@vger.kernel.org @@@ -18444,7 -18431,7 +18442,7 @@@ S: Supporte F: drivers/infiniband/sw/rdmavt
RDS - RELIABLE DATAGRAM SOCKETS - M: Santosh Shilimkar santosh.shilimkar@oracle.com + M: Allison Henderson allison.henderson@oracle.com L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org L: rds-devel@oss.oracle.com (moderated for non-subscribers) diff --combined drivers/net/ethernet/engleder/tsnep_main.c index 5bf6840c6701,64eadd320798..4b15af6b7122 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@@ -229,10 -229,8 +229,10 @@@ static int tsnep_phy_loopback(struct ts * would delay a working loopback anyway, let's ensure that loopback * is working immediately by setting link mode directly */ - if (!retval && enable) + if (!retval && enable) { + netif_carrier_on(adapter->netdev); tsnep_set_link_mode(adapter); + }
return retval; } @@@ -240,7 -238,7 +240,7 @@@ static int tsnep_phy_open(struct tsnep_adapter *adapter) { struct phy_device *phydev; - struct ethtool_eee ethtool_eee; + struct ethtool_keee ethtool_keee; int retval;
retval = phy_connect_direct(adapter->netdev, adapter->phydev, @@@ -259,8 -257,8 +259,8 @@@ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* disable EEE autoneg, EEE not supported by TSNEP */ - memset(ðtool_eee, 0, sizeof(ethtool_eee)); - phy_ethtool_set_eee(adapter->phydev, ðtool_eee); + memset(ðtool_keee, 0, sizeof(ethtool_keee)); + phy_ethtool_set_eee(adapter->phydev, ðtool_keee);
adapter->phydev->irq = PHY_MAC_INTERRUPT; phy_start(adapter->phydev); @@@ -721,17 -719,25 +721,25 @@@ static void tsnep_xdp_xmit_flush(struc
static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, struct xdp_buff *xdp, - struct netdev_queue *tx_nq, struct tsnep_tx *tx) + struct netdev_queue *tx_nq, struct tsnep_tx *tx, + bool zc) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); bool xmit; + u32 type;
if (unlikely(!xdpf)) return false;
+ /* no page pool for zero copy */ + if (zc) + type = TSNEP_TX_TYPE_XDP_NDO; + else + type = TSNEP_TX_TYPE_XDP_TX; + __netif_tx_lock(tx_nq, smp_processor_id());
- xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
/* Avoid transmit queue timeout since we share it with the slow path */ if (xmit) @@@ -1260,14 -1266,6 +1268,14 @@@ static int tsnep_rx_refill_zc(struct ts return desc_refilled; }
+static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available) +{ + if (desc_available) + xsk_set_rx_need_wakeup(rx->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx->xsk_pool); +} + static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, struct xdp_buff *xdp, int *status, struct netdev_queue *tx_nq, struct tsnep_tx *tx) @@@ -1283,7 -1281,7 +1291,7 @@@ case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@@ -1333,7 -1331,7 +1341,7 @@@ static bool tsnep_xdp_run_prog_zc(struc case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@@ -1629,7 -1627,10 +1637,7 @@@ static int tsnep_rx_poll_zc(struct tsne desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
if (xsk_uses_need_wakeup(rx->xsk_pool)) { - if (desc_available) - xsk_set_rx_need_wakeup(rx->xsk_pool); - else - xsk_clear_rx_need_wakeup(rx->xsk_pool); + tsnep_xsk_rx_need_wakeup(rx, desc_available);
return done; } @@@ -1774,8 -1775,14 +1782,8 @@@ static void tsnep_rx_reopen_xsk(struct * first polling would be too late as need wakeup signalisation would * be delayed for an indefinite time */ - if (xsk_uses_need_wakeup(rx->xsk_pool)) { - int desc_available = tsnep_rx_desc_available(rx); - - if (desc_available) - xsk_set_rx_need_wakeup(rx->xsk_pool); - else - xsk_clear_rx_need_wakeup(rx->xsk_pool); - } + if (xsk_uses_need_wakeup(rx->xsk_pool)) + tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx)); }
static bool tsnep_pending(struct tsnep_queue *queue) @@@ -2563,7 -2570,8 +2571,7 @@@ static int tsnep_probe(struct platform_ mutex_init(&adapter->rxnfc_lock); INIT_LIST_HEAD(&adapter->rxnfc_rules);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - adapter->addr = devm_ioremap_resource(&pdev->dev, io); + adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io); if (IS_ERR(adapter->addr)) return PTR_ERR(adapter->addr); netdev->mem_start = io->start; diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index da3573b0dfc2,8cfd74ad991c..8190d23a6a49 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@@ -417,7 -417,7 +417,7 @@@ static void npc_fixup_vf_rule(struct rv owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ - if (is_pffunc_af(owner) || is_afvf(target_func) || + if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) || (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; @@@ -626,7 -626,7 +626,7 @@@ void rvu_npc_install_ucast_entry(struc int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */ - if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc)) return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@@ -791,7 -791,7 +791,7 @@@ void rvu_npc_install_bcast_match_entry( return;
/* Skip LBK VFs */ - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc)) return;
/* If pkt replication is not supported, @@@ -871,7 -871,7 +871,7 @@@ void rvu_npc_install_allmulti_entry(str u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */ - if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc)) return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@@ -1850,8 -1850,8 +1850,8 @@@ void npc_mcam_rsrcs_deinit(struct rvu * { struct npc_mcam *mcam = &rvu->hw->mcam;
- kfree(mcam->bmap); - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap); + bitmap_free(mcam->bmap_reverse); kfree(mcam->entry2pfvf_map); kfree(mcam->cntr2pfvf_map); kfree(mcam->entry2cntr_map); @@@ -1904,21 -1904,20 +1904,20 @@@ int npc_mcam_rsrcs_init(struct rvu *rvu mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */ - mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap) return -ENOMEM;
- mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap_reverse) goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ - mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); + if (!mcam->entry2pfvf_map) goto free_bmap_reverse;
@@@ -1941,21 -1940,21 +1940,21 @@@ if (err) goto free_entry_map;
- mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr2pfvf_map) goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count. */ - mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); if (!mcam->entry2cntr_map) goto free_cntr_map;
- mcam->cntr_refcnt = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr_refcnt) goto free_entry_cntr_map;
@@@ -1988,9 -1987,9 +1987,9 @@@ free_cntr_bmap free_entry_map: kfree(mcam->entry2pfvf_map); free_bmap_reverse: - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap_reverse); free_bmap: - kfree(mcam->bmap); + bitmap_free(mcam->bmap);
return -ENOMEM; } diff --combined drivers/net/ethernet/stmicro/stmmac/common.h index 70bef7811c91,5ba606a596e7..07fe852001a0 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@@ -59,28 -59,51 +59,51 @@@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */
+ struct stmmac_q_tx_stats { + u64_stats_t tx_bytes; + u64_stats_t tx_set_ic_bit; + u64_stats_t tx_tso_frames; + u64_stats_t tx_tso_nfrags; + }; + + struct stmmac_napi_tx_stats { + u64_stats_t tx_packets; + u64_stats_t tx_pkt_n; + u64_stats_t poll; + u64_stats_t tx_clean; + u64_stats_t tx_set_ic_bit; + }; + struct stmmac_txq_stats { - u64 tx_bytes; - u64 tx_packets; - u64 tx_pkt_n; - u64 tx_normal_irq_n; - u64 napi_poll; - u64 tx_clean; - u64 tx_set_ic_bit; - u64 tx_tso_frames; - u64 tx_tso_nfrags; - struct u64_stats_sync syncp; + /* Updates protected by tx queue lock. */ + struct u64_stats_sync q_syncp; + struct stmmac_q_tx_stats q; + + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_tx_stats napi; } ____cacheline_aligned_in_smp;
+ struct stmmac_napi_rx_stats { + u64_stats_t rx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_pkt_n; + u64_stats_t poll; + }; + struct stmmac_rxq_stats { - u64 rx_bytes; - u64 rx_packets; - u64 rx_pkt_n; - u64 rx_normal_irq_n; - u64 napi_poll; - struct u64_stats_sync syncp; + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_rx_stats napi; } ____cacheline_aligned_in_smp;
+ /* Updates on each CPU protected by not allowing nested irqs. */ + struct stmmac_pcpu_stats { + struct u64_stats_sync syncp; + u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES]; + u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES]; + }; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@@ -202,11 -225,10 +225,12 @@@ unsigned long mtl_est_hlbf; unsigned long mtl_est_btre; unsigned long mtl_est_btrlm; + unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES]; + unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES]; /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; + struct stmmac_pcpu_stats __percpu *pcpu_stats; unsigned long rx_dropped; unsigned long rx_errors; unsigned long tx_dropped; @@@ -218,6 -240,7 +242,7 @@@ struct stmmac_safety_stats unsigned long mac_errors[32]; unsigned long mtl_errors[32]; unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; };
/* Number of fields in Safety Stats */ diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 411c3ac8cb17,ec44becf0e2d..0e44b84fb7e7 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@@ -549,44 -549,79 +549,79 @@@ stmmac_set_pauseparam(struct net_devic } }
+ static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) + { + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; + } + + static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q) + { + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; + } + static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int start; - int q, stat; - char *p; + int q;
for (q = 0; q < tx_cnt; q++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; - struct stmmac_txq_stats snapshot; + u64 pkt_n;
do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - - p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); - for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + *data++ = pkt_n; + *data++ = stmmac_get_tx_normal_irq_n(priv, q); }
for (q = 0; q < rx_cnt; q++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; - struct stmmac_rxq_stats snapshot; + u64 pkt_n;
do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - - p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); - for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + *data++ = pkt_n; + *data++ = stmmac_get_rx_normal_irq_n(priv, q); } }
@@@ -645,39 -680,49 +680,49 @@@ static void stmmac_get_ethtool_stats(st pos = j; for (i = 0; i < rx_queues_count; i++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; - struct stmmac_rxq_stats snapshot; + struct stmmac_napi_rx_stats snapshot; + u64 n_irq;
j = pos; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - - data[j++] += snapshot.rx_pkt_n; - data[j++] += snapshot.rx_normal_irq_n; - normal_irq_n += snapshot.rx_normal_irq_n; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + snapshot = rxq_stats->napi; + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&snapshot.rx_pkt_n); + n_irq = stmmac_get_rx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + napi_poll += u64_stats_read(&snapshot.poll); }
pos = j; for (i = 0; i < tx_queues_count; i++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; - struct stmmac_txq_stats snapshot; + struct stmmac_napi_tx_stats napi_snapshot; + struct stmmac_q_tx_stats q_snapshot; + u64 n_irq;
j = pos; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - - data[j++] += snapshot.tx_pkt_n; - data[j++] += snapshot.tx_normal_irq_n; - normal_irq_n += snapshot.tx_normal_irq_n; - data[j++] += snapshot.tx_clean; - data[j++] += snapshot.tx_set_ic_bit; - data[j++] += snapshot.tx_tso_frames; - data[j++] += snapshot.tx_tso_nfrags; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + q_snapshot = txq_stats->q; + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + napi_snapshot = txq_stats->napi; + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n); + n_irq = stmmac_get_tx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + data[j++] += u64_stats_read(&napi_snapshot.tx_clean); + data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) + + u64_stats_read(&napi_snapshot.tx_set_ic_bit); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags); + napi_poll += u64_stats_read(&napi_snapshot.poll); } normal_irq_n += priv->xstats.rx_early_irq; data[j++] = normal_irq_n; @@@ -852,13 -897,15 +897,13 @@@ static int stmmac_set_wol(struct net_de }
static int stmmac_ethtool_op_get_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee) return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled; - edata->eee_active = priv->eee_active; edata->tx_lpi_timer = priv->tx_lpi_timer; edata->tx_lpi_enabled = priv->tx_lpi_enabled;
@@@ -866,7 -913,7 +911,7 @@@ }
static int stmmac_ethtool_op_set_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev); int ret; diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 04d817dc5899,75d029704503..5236956cc7e4 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -2482,7 -2482,6 +2482,6 @@@ static bool stmmac_xdp_xmit_zc(struct s struct xdp_desc xdp_desc; bool work_done = true; u32 tx_set_ic_bit = 0; - unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); @@@ -2507,13 -2506,6 +2506,13 @@@ if (!xsk_tx_peek_desc(pool, &xdp_desc)) break;
+ if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + xdp_desc.len > priv->plat->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + continue; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@@ -2573,9 -2565,9 +2572,9 @@@ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit += tx_set_ic_bit; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); + u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@@ -2623,7 -2615,6 +2622,6 @@@ static int stmmac_tx_clean(struct stmma unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; - unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@@ -2789,11 -2780,11 +2787,11 @@@ if (tx_q->dirty_tx != tx_q->cur_tx) *pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_packets += tx_packets; - txq_stats->tx_pkt_n += tx_packets; - txq_stats->tx_clean++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); + u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); + u64_stats_inc(&txq_stats->napi.tx_clean); + u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@@ -4220,7 -4211,6 +4218,6 @@@ static netdev_tx_t stmmac_tso_xmit(stru struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; - unsigned long flags; u32 pay_len, mss; dma_addr_t des; int i; @@@ -4385,13 -4375,13 +4382,13 @@@ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); }
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; - txq_stats->tx_tso_frames++; - txq_stats->tx_tso_nfrags += nfrags; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); + u64_stats_inc(&txq_stats->q.tx_tso_frames); + u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@@ -4490,7 -4480,6 +4487,6 @@@ static netdev_tx_t stmmac_xmit(struct s struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; - unsigned long flags; dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue]; @@@ -4508,13 -4497,6 +4504,13 @@@ return stmmac_tso_xmit(skb, dev); }
+ if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + skb->len > priv->plat->est->max_sdu[queue]){ + priv->xstats.max_sdu_txq_drop[queue]++; + goto max_sdu_err; + } + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, @@@ -4667,11 -4649,11 +4663,11 @@@ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); }
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@@ -4732,7 -4714,6 +4728,7 @@@
dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); +max_sdu_err: dev_kfree_skb(skb); priv->xstats.tx_dropped++; return NETDEV_TX_OK; @@@ -4889,13 -4870,6 +4885,13 @@@ static int stmmac_xdp_xmit_xdpf(struct if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + xdpf->len > priv->plat->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + return STMMAC_XDP_CONSUMED; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@@ -4943,12 -4917,11 +4939,11 @@@ set_ic = false;
if (set_ic) { - unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); }
stmmac_enable_dma_transmission(priv, priv->ioaddr); @@@ -5098,7 -5071,6 +5093,6 @@@ static void stmmac_dispatch_skb_zc(stru unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; - unsigned long flags; struct sk_buff *skb; u32 hash;
@@@ -5128,10 -5100,10 +5122,10 @@@ skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n++; - rxq_stats->rx_bytes += len; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.rx_pkt_n); + u64_stats_add(&rxq_stats->napi.rx_bytes, len); + u64_stats_update_end(&rxq_stats->napi_syncp); }
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@@ -5213,7 -5185,6 +5207,6 @@@ static int stmmac_rx_zc(struct stmmac_p unsigned int desc_size; struct bpf_prog *prog; bool failure = false; - unsigned long flags; int xdp_status = 0; int status = 0;
@@@ -5368,9 -5339,9 +5361,9 @@@ read_again
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@@ -5408,7 -5379,6 +5401,6 @@@ static int stmmac_rx(struct stmmac_pri unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; - unsigned long flags; int xdp_status = 0; int buf_sz;
@@@ -5668,11 -5638,11 +5660,11 @@@ drain_data
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_packets += rx_packets; - rxq_stats->rx_bytes += rx_bytes; - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); + u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@@ -5687,13 -5657,12 +5679,12 @@@ static int stmmac_napi_poll_rx(struct n struct stmmac_priv *priv = ch->priv_data; struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; - unsigned long flags; int work_done;
rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@@ -5715,13 -5684,12 +5706,12 @@@ static int stmmac_napi_poll_tx(struct n struct stmmac_txq_stats *txq_stats; bool pending_packets = false; u32 chan = ch->index; - unsigned long flags; int work_done;
txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp);
work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); @@@ -5751,17 -5719,16 +5741,16 @@@ static int stmmac_napi_poll_rxtx(struc struct stmmac_rxq_stats *rxq_stats; struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; - unsigned long flags;
rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp);
txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp);
tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); @@@ -7087,10 -7054,13 +7076,13 @@@ static void stmmac_get_stats64(struct n u64 tx_bytes;
do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - tx_packets = txq_stats->tx_packets; - tx_bytes = txq_stats->tx_bytes; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; @@@ -7102,10 -7072,10 +7094,10 @@@ u64 rx_bytes;
do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - rx_packets = rxq_stats->rx_packets; - rx_bytes = rxq_stats->rx_bytes; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); + rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@@ -7499,9 -7469,16 +7491,16 @@@ int stmmac_dvr_probe(struct device *dev priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++) - u64_stats_init(&priv->xstats.rxq_stats[i].syncp); - for (i = 0; i < MTL_MAX_TX_QUEUES; i++) - u64_stats_init(&priv->xstats.txq_stats[i].syncp); + u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); + u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); + } + + priv->xstats.pcpu_stats = + devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); + if (!priv->xstats.pcpu_stats) + return -ENOMEM;
stmmac_set_ethtool_ops(ndev); priv->pause = pause; diff --combined drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 736b2ada6f59,28d6a30cc010..d0cb39278cb9 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@@ -32,7 -32,6 +32,7 @@@ #include "vendor.h" #include "bus.h" #include "common.h" +#include "fwvid.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@@ -1180,7 -1179,8 +1180,7 @@@ s32 brcmf_notify_escan_complete(struct scan_request = cfg->scan_request; cfg->scan_request = NULL;
- if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); + timer_delete_sync(&cfg->escan_timeout);
if (fw_abort) { /* Do a scan abort to stop the driver's scan engine */ @@@ -1687,39 -1687,52 +1687,39 @@@ static u16 brcmf_map_fw_linkdown_reason return reason; }
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) +int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags) { struct brcmf_pub *drvr = ifp->drvr; struct brcmf_wsec_pmk_le pmk; int err;
+ if (key_len > sizeof(pmk.key)) { + bphy_err(drvr, "key must be less than %zu bytes\n", + sizeof(pmk.key)); + return -EINVAL; + } + memset(&pmk, 0, sizeof(pmk));
- /* pass pmk directly */ - pmk.key_len = cpu_to_le16(pmk_len); - pmk.flags = cpu_to_le16(0); - memcpy(pmk.key, pmk_data, pmk_len); + /* pass key material directly */ + pmk.key_len = cpu_to_le16(key_len); + pmk.flags = cpu_to_le16(flags); + memcpy(pmk.key, key, key_len);
- /* store psk in firmware */ + /* store key material in firmware */ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK, &pmk, sizeof(pmk)); if (err < 0) bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n", - pmk_len); + key_len);
return err; } +BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data, - u16 pwd_len) +static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) { - struct brcmf_pub *drvr = ifp->drvr; - struct brcmf_wsec_sae_pwd_le sae_pwd; - int err; - - if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) { - bphy_err(drvr, "sae_password must be less than %d\n", - BRCMF_WSEC_MAX_SAE_PASSWORD_LEN); - return -EINVAL; - } - - sae_pwd.key_len = cpu_to_le16(pwd_len); - memcpy(sae_pwd.key, pwd_data, pwd_len); - - err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd, - sizeof(sae_pwd)); - if (err < 0) - bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n", - pwd_len); - - return err; + return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0); }
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason, @@@ -2490,7 -2503,8 +2490,7 @@@ brcmf_cfg80211_connect(struct wiphy *wi bphy_err(drvr, "failed to clean up user-space RSNE\n"); goto done; } - err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd, - sme->crypto.sae_pwd_len); + err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto); if (!err && sme->crypto.psk) err = brcmf_set_pmk(ifp, sme->crypto.psk, BRCMF_WSEC_MAX_PSK_LEN); @@@ -3067,7 -3081,7 +3067,7 @@@ brcmf_cfg80211_get_station_ibss(struct struct brcmf_scb_val_le scbval; struct brcmf_pktcnt_le pktcnt; s32 err; - u32 rate; + u32 rate = 0; u32 rssi;
/* Get the current tx rate */ @@@ -3765,8 -3779,10 +3765,10 @@@ static int brcmf_internal_escan_add_inf if (req->channels[i] == chan) break; } - if (i == req->n_channels) - req->channels[req->n_channels++] = chan; + if (i == req->n_channels) { + req->n_channels++; + req->channels[i] = chan; + }
for (i = 0; i < req->n_ssids; i++) { if (req->ssids[i].ssid_len == ssid_len && @@@ -5238,7 -5254,8 +5240,7 @@@ brcmf_cfg80211_start_ap(struct wiphy *w if (crypto->sae_pwd) { brcmf_dbg(INFO, "using SAE offload\n"); profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE); - err = brcmf_set_sae_password(ifp, crypto->sae_pwd, - crypto->sae_pwd_len); + err = brcmf_fwvid_set_sae_password(ifp, crypto); if (err < 0) goto exit; } @@@ -5345,12 -5362,10 +5347,12 @@@ static int brcmf_cfg80211_stop_ap(struc msleep(400);
if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) { + struct cfg80211_crypto_settings crypto = {}; + if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK)) brcmf_set_pmk(ifp, NULL, 0); if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE)) - brcmf_set_sae_password(ifp, NULL, 0); + brcmf_fwvid_set_sae_password(ifp, &crypto); profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE); }
@@@ -7256,7 -7271,7 +7258,7 @@@ static int brcmf_setup_wiphybands(struc u32 nmode = 0; u32 vhtmode = 0; u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; - u32 rxchain; + u32 rxchain = 0; u32 nchain; int err; s32 i; @@@ -8422,7 -8437,6 +8424,7 @@@ void brcmf_cfg80211_detach(struct brcmf brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); wl_deinit_priv(cfg); + cancel_work_sync(&cfg->escan_timeout_work); brcmf_free_wiphy(cfg->wiphy); kfree(cfg); } diff --combined drivers/net/wireless/microchip/wilc1000/netdev.c index 3be3c1754770,81e8f25863f5..ef22bf6bf86a --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@@ -416,7 -416,7 +416,7 @@@ static int wilc_init_fw_config(struct n
b = 1; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1, - 1, 1)) + 1, 0)) goto fail;
return 0; @@@ -989,6 -989,13 +989,6 @@@ struct wilc_vif *wilc_netdev_ifc_init(s goto error; }
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM, - ndev->name); - if (!wl->hif_workqueue) { - ret = -ENOMEM; - goto unregister_netdev; - } - ndev->needs_free_netdev = true; vif->iftype = vif_type; vif->idx = wilc_get_available_idx(wl); @@@ -1001,14 -1008,16 +1001,15 @@@
return vif;
-unregister_netdev: +error: if (rtnl_locked) cfg80211_unregister_netdevice(ndev); else unregister_netdev(ndev); - error: free_netdev(ndev); return ERR_PTR(ret); }
+ MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER)); diff --combined include/net/netfilter/nf_tables.h index f3a7c4b1dd63,510244cc0f8f..e27c28b612e4 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@@ -808,10 -808,16 +808,16 @@@ static inline struct nft_set_elem_expr return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS); }
- static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) + static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext, + u64 tstamp) { return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) && - time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext)); + time_after_eq64(tstamp, *nft_set_ext_expiration(ext)); + } + + static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) + { + return __nft_set_elem_expired(ext, get_jiffies_64()); }
static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set, @@@ -1271,12 -1277,6 +1277,12 @@@ static inline bool nft_table_has_owner( return table->flags & NFT_TABLE_F_OWNER; }
+static inline bool nft_table_is_orphan(const struct nft_table *table) +{ + return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) == + NFT_TABLE_F_PERSIST; +} + static inline bool nft_base_chain_netdev(int family, u32 hooknum) { return family == NFPROTO_NETDEV || @@@ -1785,6 -1785,7 +1791,7 @@@ struct nftables_pernet struct list_head notify_list; struct mutex commit_mutex; u64 table_handle; + u64 tstamp; unsigned int base_seq; unsigned int gc_seq; u8 validate_state; @@@ -1797,6 -1798,11 +1804,11 @@@ static inline struct nftables_pernet *n return net_generic(net, nf_tables_net_id); }
+ static inline u64 nft_net_tstamp(const struct net *net) + { + return nft_pernet(net)->tstamp; + } + #define __NFT_REDUCE_READONLY 1UL #define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
diff --combined include/uapi/linux/netfilter/nf_tables.h index 3fee994721cd,117c6a9b845b..aa4094ca2444 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@@ -179,17 -179,13 +179,17 @@@ enum nft_hook_attributes * enum nft_table_flags - nf_tables table flags * * @NFT_TABLE_F_DORMANT: this table is not active + * @NFT_TABLE_F_OWNER: this table is owned by a process + * @NFT_TABLE_F_PERSIST: this table shall outlive its owner */ enum nft_table_flags { NFT_TABLE_F_DORMANT = 0x1, NFT_TABLE_F_OWNER = 0x2, + NFT_TABLE_F_PERSIST = 0x4, }; #define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT | \ - NFT_TABLE_F_OWNER) + NFT_TABLE_F_OWNER | \ + NFT_TABLE_F_PERSIST)
/** * enum nft_table_attributes - nf_tables table netlink attributes @@@ -289,9 -285,11 +289,11 @@@ enum nft_rule_attributes /** * enum nft_rule_compat_flags - nf_tables rule compat flags * + * @NFT_RULE_COMPAT_F_UNUSED: unused * @NFT_RULE_COMPAT_F_INV: invert the check result */ enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = (1 << 0), NFT_RULE_COMPAT_F_INV = (1 << 1), NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV, }; diff --combined net/netfilter/nf_tables_api.c index b68d1e59c786,f8e3f70c35bd..cb6f49a3d809 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -1194,10 -1194,8 +1194,10 @@@ static void nf_tables_table_disable(str #define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1) #define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0) #define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1) +#define __NFT_TABLE_F_WAS_ORPHAN (__NFT_TABLE_F_INTERNAL << 2) #define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \ - __NFT_TABLE_F_WAS_AWAKEN) + __NFT_TABLE_F_WAS_AWAKEN | \ + __NFT_TABLE_F_WAS_ORPHAN)
static int nf_tables_updtable(struct nft_ctx *ctx) { @@@ -1217,11 -1215,8 +1217,11 @@@
if ((nft_table_has_owner(ctx->table) && !(flags & NFT_TABLE_F_OWNER)) || - (!nft_table_has_owner(ctx->table) && - flags & NFT_TABLE_F_OWNER)) + (flags & NFT_TABLE_F_OWNER && + !nft_table_is_orphan(ctx->table))) + return -EOPNOTSUPP; + + if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST) return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */ @@@ -1250,13 -1245,6 +1250,13 @@@ } }
+ if ((flags & NFT_TABLE_F_OWNER) && + !nft_table_has_owner(ctx->table)) { + ctx->table->nlpid = ctx->portid; + ctx->table->flags |= NFT_TABLE_F_OWNER | + __NFT_TABLE_F_WAS_ORPHAN; + } + nft_trans_table_update(trans) = true; nft_trans_commit_list_add_tail(ctx->net, trans);
@@@ -4247,18 -4235,23 +4247,18 @@@ static bool nft_set_ops_candidate(cons * given, in that case the amount of memory per element is used. */ static const struct nft_set_ops * -nft_select_set_ops(const struct nft_ctx *ctx, - const struct nlattr * const nla[], +nft_select_set_ops(const struct nft_ctx *ctx, u32 flags, const struct nft_set_desc *desc) { struct nftables_pernet *nft_net = nft_pernet(ctx->net); const struct nft_set_ops *ops, *bops; struct nft_set_estimate est, best; const struct nft_set_type *type; - u32 flags = 0; int i;
lockdep_assert_held(&nft_net->commit_mutex); lockdep_nfnl_nft_mutex_not_held();
- if (nla[NFTA_SET_FLAGS] != NULL) - flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); - bops = NULL; best.size = ~0; best.lookup = ~0; @@@ -5144,7 -5137,7 +5144,7 @@@ static int nf_tables_newset(struct sk_b if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc); + ops = nft_select_set_ops(&ctx, flags, &desc); if (IS_ERR(ops)) return PTR_ERR(ops);
@@@ -9834,6 -9827,7 +9834,7 @@@ dead_elem struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc) { struct nft_set_elem_catchall *catchall, *next; + u64 tstamp = nft_net_tstamp(gc->net); const struct nft_set *set = gc->set; struct nft_elem_priv *elem_priv; struct nft_set_ext *ext; @@@ -9843,7 -9837,7 +9844,7 @@@ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) { ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_expired(ext)) + if (!__nft_set_elem_expired(ext, tstamp)) continue;
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL); @@@ -10431,10 -10425,6 +10432,10 @@@ static int __nf_tables_abort(struct ne } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) { trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT; } + if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) { + trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER; + trans->ctx.table->nlpid = 0; + } trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE; nft_trans_destroy(trans); } else { @@@ -10633,6 -10623,7 +10634,7 @@@ static bool nf_tables_valid_genid(struc bool genid_ok;
mutex_lock(&nft_net->commit_mutex); + nft_net->tstamp = get_jiffies_64();
genid_ok = genid == 0 || nft_net->base_seq == genid; if (!genid_ok) @@@ -11360,10 -11351,6 +11362,10 @@@ again list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && n->portid == table->nlpid) { + if (table->flags & NFT_TABLE_F_PERSIST) { + table->flags &= ~NFT_TABLE_F_OWNER; + continue; + } __nft_release_hook(net, table); list_del_rcu(&table->list); to_delete[deleted++] = table; diff --combined net/unix/garbage.c index 9b8473dd79a4,8f63f0b4bf01..3e4b986de94b --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@@ -81,80 -81,12 +81,80 @@@ #include <net/scm.h> #include <net/tcp_states.h>
-#include "scm.h" +struct unix_sock *unix_get_socket(struct file *filp) +{ + struct inode *inode = file_inode(filp); + + /* Socket ? */ + if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { + struct socket *sock = SOCKET_I(inode); + const struct proto_ops *ops; + struct sock *sk = sock->sk; + + ops = READ_ONCE(sock->ops);
-/* Internal data structures and random procedures: */ + /* PF_UNIX ? */ + if (sk && ops && ops->family == PF_UNIX) + return unix_sk(sk); + } + + return NULL; +}
+DEFINE_SPINLOCK(unix_gc_lock); +unsigned int unix_tot_inflight; static LIST_HEAD(gc_candidates); -static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); +static LIST_HEAD(gc_inflight_list); + +/* Keep the number of times in flight count for the file + * descriptor if it is for an AF_UNIX socket. + */ +void unix_inflight(struct user_struct *user, struct file *filp) +{ + struct unix_sock *u = unix_get_socket(filp); + + spin_lock(&unix_gc_lock); + + if (u) { + if (!u->inflight) { + WARN_ON_ONCE(!list_empty(&u->link)); + list_add_tail(&u->link, &gc_inflight_list); + } else { + WARN_ON_ONCE(list_empty(&u->link)); + } + u->inflight++; + + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); + } + + WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); + + spin_unlock(&unix_gc_lock); +} + +void unix_notinflight(struct user_struct *user, struct file *filp) +{ + struct unix_sock *u = unix_get_socket(filp); + + spin_lock(&unix_gc_lock); + + if (u) { + WARN_ON_ONCE(!u->inflight); + WARN_ON_ONCE(list_empty(&u->link)); + + u->inflight--; + if (!u->inflight) + list_del_init(&u->link); + + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); + } + + WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); + + spin_unlock(&unix_gc_lock); +}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) @@@ -173,15 -105,20 +173,15 @@@
while (nfd--) { /* Get the socket the fd matches if it indeed does so */ - struct sock *sk = unix_get_socket(*fp++); - - if (sk) { - struct unix_sock *u = unix_sk(sk); + struct unix_sock *u = unix_get_socket(*fp++);
- /* Ignore non-candidates, they could - * have been added to the queues after - * starting the garbage collection - */ - if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { - hit = true; + /* Ignore non-candidates, they could have been added + * to the queues after starting the garbage collection + */ + if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { + hit = true;
- func(u); - } + func(u); } } if (hit && hitlist != NULL) { @@@ -214,7 -151,7 +214,7 @@@ static void scan_children(struct sock * /* An embryo cannot be in-flight, so it's safe * to use the list link. */ - BUG_ON(!list_empty(&u->link)); + WARN_ON_ONCE(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock); @@@ -229,18 -166,17 +229,18 @@@
static void dec_inflight(struct unix_sock *usk) { - atomic_long_dec(&usk->inflight); + usk->inflight--; }
static void inc_inflight(struct unix_sock *usk) { - atomic_long_inc(&usk->inflight); + usk->inflight++; }
static void inc_inflight_move_tail(struct unix_sock *u) { - atomic_long_inc(&u->inflight); + u->inflight++; + /* If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over @@@ -250,16 -186,40 +250,16 @@@ }
static bool gc_in_progress; -#define UNIX_INFLIGHT_TRIGGER_GC 16000 - -void wait_for_unix_gc(void) -{ - /* If number of inflight sockets is insane, - * force a garbage collect right now. - * Paired with the WRITE_ONCE() in unix_inflight(), - * unix_notinflight() and gc_in_progress(). - */ - if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && - !READ_ONCE(gc_in_progress)) - unix_gc(); - wait_event(unix_gc_wait, gc_in_progress == false); -}
-/* The external entry point: unix_gc() */ -void unix_gc(void) +static void __unix_gc(struct work_struct *work) { - struct sk_buff *next_skb, *skb; - struct unix_sock *u; - struct unix_sock *next; struct sk_buff_head hitlist; - struct list_head cursor; + struct unix_sock *u, *next; LIST_HEAD(not_cycle_list); + struct list_head cursor;
spin_lock(&unix_gc_lock);
- /* Avoid a recursive GC. */ - if (gc_in_progress) - goto out; - - /* Paired with READ_ONCE() in wait_for_unix_gc(). */ - WRITE_ONCE(gc_in_progress, true); - /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. @@@ -277,12 -237,14 +277,12 @@@ */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; - long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file); - inflight_refs = atomic_long_read(&u->inflight);
- BUG_ON(inflight_refs < 1); - BUG_ON(total_refs < inflight_refs); - if (total_refs == inflight_refs) { + WARN_ON_ONCE(!u->inflight); + WARN_ON_ONCE(total_refs < u->inflight); + if (total_refs == u->inflight) { list_move_tail(&u->link, &gc_candidates); __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); @@@ -309,7 -271,7 +309,7 @@@ /* Move cursor to after the current position. */ list_move(&cursor, &u->link);
- if (atomic_long_read(&u->inflight) > 0) { + if (u->inflight) { list_move_tail(&u->link, ¬_cycle_list); __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); scan_children(&u->sk, inc_inflight_move_tail, NULL); @@@ -336,50 -298,49 +336,61 @@@
spin_unlock(&unix_gc_lock);
- /* We need io_uring to clean its registered files, ignore all io_uring - * originated skbs. It's fine as io_uring doesn't keep references to - * other io_uring instances and so killing all other files in the cycle - * will put all io_uring references forcing it to go through normal - * release.path eventually putting registered files. - */ - skb_queue_walk_safe(&hitlist, skb, next_skb) { - if (skb->destructor == io_uring_destruct_scm) { - __skb_unlink(skb, &hitlist); - skb_queue_tail(&skb->sk->sk_receive_queue, skb); - } - } - /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist);
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB) + list_for_each_entry_safe(u, next, &gc_candidates, link) { + struct sk_buff *skb = u->oob_skb; + + if (skb) { + u->oob_skb = NULL; + kfree_skb(skb); + } + } + #endif + spin_lock(&unix_gc_lock);
- /* There could be io_uring registered files, just push them back to - * the inflight list - */ - list_for_each_entry_safe(u, next, &gc_candidates, link) - list_move_tail(&u->link, &gc_inflight_list); - /* All candidates should have been detached by now. */ - BUG_ON(!list_empty(&gc_candidates)); + WARN_ON_ONCE(!list_empty(&gc_candidates));
/* Paired with READ_ONCE() in wait_for_unix_gc(). */ WRITE_ONCE(gc_in_progress, false);
- wake_up(&unix_gc_wait); - - out: spin_unlock(&unix_gc_lock); } + +static DECLARE_WORK(unix_gc_work, __unix_gc); + +void unix_gc(void) +{ + WRITE_ONCE(gc_in_progress, true); + queue_work(system_unbound_wq, &unix_gc_work); +} + +#define UNIX_INFLIGHT_TRIGGER_GC 16000 +#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) + +void wait_for_unix_gc(struct scm_fp_list *fpl) +{ + /* If number of inflight sockets is insane, + * force a garbage collect right now. + * + * Paired with the WRITE_ONCE() in unix_inflight(), + * unix_notinflight(), and __unix_gc(). + */ + if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && + !READ_ONCE(gc_in_progress)) + unix_gc(); + + /* Penalise users who want to send AF_UNIX sockets + * but whose sockets have not been received yet. + */ + if (!fpl || !fpl->count_unix || + READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) + return; + + if (READ_ONCE(gc_in_progress)) + flush_work(&unix_gc_work); +}