The following commit has been merged in the master branch:
commit 9b19e57a3c78f1f7c08a48bafb7d84caf6e80b68
Merge: b33177f1d62bea7dd8a7dd6775116958ea71dc6b f3f19f939c11925dadd3f4776f99f8c278a7017b
Author: Jakub Kicinski <kuba(a)kernel.org>
Date: Thu May 12 15:39:02 2022 -0700
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts.
Build issue in drivers/net/ethernet/sfc/ptp.c
54fccfdd7c66 ("sfc: efx_default_channel_type APIs can be static")
49e6123c65da ("net: sfc: fix memory leak due to ptp channel")
https://lore.kernel.org/all/20220510130556.52598fe2@canb.auug.org.au/
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --combined MAINTAINERS
index de6a26a01166,b7b1dfba707c..69b597aa4bc7
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -3571,8 -3571,9 +3571,9 @@@ M: Andy Gospodarek <andy(a)greyhouse.net
L: netdev(a)vger.kernel.org
S: Supported
W:
http://sourceforge.net/projects/bonding/
+ F: Documentation/networking/bonding.rst
F: drivers/net/bonding/
- F: include/net/bonding.h
+ F: include/net/bond*
F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@@ -5050,6 -5051,12 +5051,6 @@@ S: Maintaine
F: Documentation/hwmon/corsair-psu.rst
F: drivers/hwmon/corsair-psu.c
-COSA/SRP SYNC SERIAL DRIVER
-M: Jan "Yenya" Kasprzak <kas(a)fi.muni.cz>
-S: Maintained
-W:
http://www.fi.muni.cz/~kas/cosa/
-F: drivers/net/wan/cosa*
-
COUNTER SUBSYSTEM
M: William Breathitt Gray <vilhelm.gray(a)gmail.com>
L: linux-iio(a)vger.kernel.org
@@@ -5231,14 -5238,6 +5232,14 @@@ T: git
git://linuxtv.org/media_tree.gi
F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
F: drivers/media/platform/sunxi/sun6i-csi/
+CTU CAN FD DRIVER
+M: Pavel Pisa <pisa(a)cmp.felk.cvut.cz>
+M: Ondrej Ille <ondrej.ille(a)gmail.com>
+L: linux-can(a)vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
+F: drivers/net/can/ctucanfd/
+
CW1200 WLAN driver
M: Solomon Peachy <pizza(a)shaftnet.org>
S: Maintained
@@@ -5919,7 -5918,7 +5920,7 @@@ R: Benjamin Gaignard <benjamin.gaignard
R: Liam Mark <lmark(a)codeaurora.org>
R: Laura Abbott <labbott(a)redhat.com>
R: Brian Starkey <Brian.Starkey(a)arm.com>
- R: John Stultz <john.stultz(a)linaro.org>
+ R: John Stultz <jstultz(a)google.com>
L: linux-media(a)vger.kernel.org
L: dri-devel(a)lists.freedesktop.org
L: linaro-mm-sig(a)lists.linaro.org (moderated for non-subscribers)
@@@ -6589,7 -6588,7 +6590,7 @@@ F: drivers/gpu/drm/gma500
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <xinliang.liu(a)linaro.org>
M: Tian Tao <tiantao6(a)hisilicon.com>
- R: John Stultz <john.stultz(a)linaro.org>
+ R: John Stultz <jstultz(a)google.com>
R: Xinwei Kong <kong.kongxinwei(a)hisilicon.com>
R: Chen Feng <puck.chen(a)hisilicon.com>
L: dri-devel(a)lists.freedesktop.org
@@@ -7501,7 -7500,7 +7502,7 @@@ F: Documentation/hwmon/f71805f.rs
F: drivers/hwmon/f71805f.c
FADDR2LINE
- M: Josh Poimboeuf <jpoimboe(a)redhat.com>
+ M: Josh Poimboeuf <jpoimboe(a)kernel.org>
S: Maintained
F: scripts/faddr2line
@@@ -8114,7 -8113,7 +8115,7 @@@ M: Ingo Molnar <mingo(a)redhat.com
R: Peter Zijlstra <peterz(a)infradead.org>
R: Darren Hart <dvhart(a)infradead.org>
R: Davidlohr Bueso <dave(a)stgolabs.net>
- R: Andr�� Almeida <andrealmeid(a)collabora.com>
+ R: Andr�� Almeida <andrealmeid(a)igalia.com>
L: linux-kernel(a)vger.kernel.org
S: Maintained
T: git
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@@@ -8387,7 -8386,7 +8388,7 @@@ M: Linus Walleij <linus.walleij@linaro.
M: Bartosz Golaszewski <brgl(a)bgdev.pl>
L: linux-gpio(a)vger.kernel.org
S: Maintained
- T: git
git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+ T: git
git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
F: Documentation/ABI/obsolete/sysfs-gpio
F: Documentation/ABI/testing/gpio-cdev
F: Documentation/admin-guide/gpio/
@@@ -8769,6 -8768,7 +8770,6 @@@ F: kernel/time/timer_*.
HIGH-SPEED SCC DRIVER FOR AX.25
L: linux-hams(a)vger.kernel.org
S: Orphan
-F: drivers/net/hamradio/dmascc.c
F: drivers/net/hamradio/scc.c
HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
@@@ -8849,7 -8849,7 +8850,7 @@@ F: Documentation/devicetree/bindings/ne
F: drivers/net/ethernet/hisilicon/
HIKEY960 ONBOARD USB GPIO HUB DRIVER
- M: John Stultz <john.stultz(a)linaro.org>
+ M: John Stultz <jstultz(a)google.com>
L: linux-kernel(a)vger.kernel.org
S: Maintained
F: drivers/misc/hisi_hikey_usb.c
@@@ -10132,7 -10132,7 +10133,7 @@@ S: Supporte
F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
- M: Luca Coelho <luciano.coelho(a)intel.com>
+ M: Gregory Greenman <gregory.greenman(a)intel.com>
L: linux-wireless(a)vger.kernel.org
S: Supported
W:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
@@@ -11349,7 -11349,7 +11350,7 @@@ F: drivers/mmc/host/litex_mmc.
N: litex
LIVE PATCHING
- M: Josh Poimboeuf <jpoimboe(a)redhat.com>
+ M: Josh Poimboeuf <jpoimboe(a)kernel.org>
M: Jiri Kosina <jikos(a)kernel.org>
M: Miroslav Benes <mbenes(a)suse.cz>
M: Petr Mladek <pmladek(a)suse.com>
@@@ -11832,13 -11832,6 +11833,13 @@@ S: Supporte
F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
F: drivers/mmc/host/sdhci-xenon*
+MARVELL OCTEON ENDPOINT DRIVER
+M: Veerasenareddy Burru <vburru(a)marvell.com>
+M: Abhijit Ayarekar <aayarekar(a)marvell.com>
+L: netdev(a)vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/marvell/octeon_ep
+
MATROX FRAMEBUFFER DRIVER
L: linux-fbdev(a)vger.kernel.org
S: Orphan
@@@ -12488,17 -12481,6 +12489,17 @@@ S: Maintaine
F: drivers/net/dsa/mt7530.*
F: net/dsa/tag_mtk.c
+MEDIATEK T7XX 5G WWAN MODEM DRIVER
+M: Chandrashekar Devegowda <chandrashekar.devegowda(a)intel.com>
+M: Intel Corporation <linuxwwan(a)intel.com>
+R: Chiranjeevi Rapolu <chiranjeevi.rapolu(a)linux.intel.com>
+R: Liu Haijun <haijun.liu(a)mediatek.com>
+R: M Chetan Kumar <m.chetan.kumar(a)linux.intel.com>
+R: Ricardo Martinez <ricardo.martinez(a)linux.intel.com>
+L: netdev(a)vger.kernel.org
+S: Supported
+F: drivers/net/wwan/t7xx/
+
MEDIATEK USB3 DRD IP DRIVER
M: Chunfeng Yun <chunfeng.yun(a)mediatek.com>
L: linux-usb(a)vger.kernel.org
@@@ -12928,13 -12910,6 +12929,13 @@@ F: drivers/net/dsa/microchip/
F: include/linux/platform_data/microchip-ksz.h
F: net/dsa/tag_ksz.c
+MICROCHIP LAN87xx/LAN937x T1 PHY DRIVER
+M: Arun Ramadoss <arun.ramadoss(a)microchip.com>
+R: UNGLinuxDriver(a)microchip.com
+L: netdev(a)vger.kernel.org
+S: Maintained
+F: drivers/net/phy/microchip_t1.c
+
MICROCHIP LAN743X ETHERNET DRIVER
M: Bryan Whitehead <bryan.whitehead(a)microchip.com>
M: UNGLinuxDriver(a)microchip.com
@@@ -14250,7 -14225,7 +14251,7 @@@ F: lib/objagg.
F: lib/test_objagg.c
OBJTOOL
- M: Josh Poimboeuf <jpoimboe(a)redhat.com>
+ M: Josh Poimboeuf <jpoimboe(a)kernel.org>
M: Peter Zijlstra <peterz(a)infradead.org>
S: Supported
F: tools/objtool/
@@@ -16005,12 -15980,6 +16006,12 @@@ T: git
git://linuxtv.org/media_tree.gi
F: Documentation/admin-guide/media/pulse8-cec.rst
F: drivers/media/cec/usb/pulse8/
+PURELIFI PLFXLC DRIVER
+M: Srinivasan Raju <srini.raju(a)purelifi.com>
+L: linux-wireless(a)vger.kernel.org
+S: Supported
+F: drivers/net/wireless/purelifi/plfxlc/
+
PVRUSB2 VIDEO4LINUX DRIVER
M: Mike Isely <isely(a)pobox.com>
L: pvrusb2(a)isely.net (subscribers-only)
@@@ -18021,8 -17990,8 +18022,8 @@@ F: drivers/platform/x86/touchscreen_dmi
SILICON LABS WIRELESS DRIVERS (for WFxxx series)
M: J��r��me Pouiller <jerome.pouiller(a)silabs.com>
S: Supported
-F: Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml
-F: drivers/staging/wfx/
+F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
+F: drivers/net/wireless/silabs/wfx/
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee(a)gmail.com>
@@@ -18824,7 -18793,7 +18825,7 @@@ F: include/dt-bindings/reset/starfive-j
STATIC BRANCH/CALL
M: Peter Zijlstra <peterz(a)infradead.org>
- M: Josh Poimboeuf <jpoimboe(a)redhat.com>
+ M: Josh Poimboeuf <jpoimboe(a)kernel.org>
M: Jason Baron <jbaron(a)akamai.com>
R: Steven Rostedt <rostedt(a)goodmis.org>
R: Ard Biesheuvel <ardb(a)kernel.org>
@@@ -18908,14 -18877,6 +18909,14 @@@ L: netdev(a)vger.kernel.or
S: Maintained
F: drivers/net/ethernet/dlink/sundance.c
+SUNPLUS ETHERNET DRIVER
+M: Wells Lu <wellslutw(a)gmail.com>
+L: netdev(a)vger.kernel.org
+S: Maintained
+W:
https://sunplus.atlassian.net/wiki/spaces/doc/overview
+F: Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml
+F: drivers/net/ethernet/sunplus/
+
SUNPLUS OCOTP DRIVER
M: Vincent Shih <vincent.sunplus(a)gmail.com>
S: Maintained
@@@ -19833,7 -19794,7 +19834,7 @@@ F: drivers/net/wireless/ti
F: include/linux/wl12xx.h
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
- M: John Stultz <john.stultz(a)linaro.org>
+ M: John Stultz <jstultz(a)google.com>
M: Thomas Gleixner <tglx(a)linutronix.de>
R: Stephen Boyd <sboyd(a)kernel.org>
L: linux-kernel(a)vger.kernel.org
@@@ -21484,7 -21445,7 +21485,7 @@@ F: arch/x86/kernel/apic/x2apic_uv_x.
F: arch/x86/platform/uv/
X86 STACK UNWINDING
- M: Josh Poimboeuf <jpoimboe(a)redhat.com>
+ M: Josh Poimboeuf <jpoimboe(a)kernel.org>
M: Peter Zijlstra <peterz(a)infradead.org>
S: Supported
F: arch/x86/include/asm/unwind*.h
@@@ -21683,7 -21644,7 +21684,7 @@@ M: Appana Durga Kedareswara rao <appana
R: Naga Sureshkumar Relli <naga.sureshkumar.relli(a)xilinx.com>
L: linux-can(a)vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/can/xilinx_can.txt
+F: Documentation/devicetree/bindings/net/can/xilinx,can.yaml
F: drivers/net/can/xilinx_can.c
XILINX GPIO DRIVER
diff --combined drivers/net/dsa/ocelot/felix.c
index f5c9d695408a,faccfb3f0158..a23781d9a15c
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@@ -42,6 -42,44 +42,6 @@@ static struct net_device *felix_classif
}
}
-/* We are called before felix_npi_port_init(), so ocelot->npi is -1. */
-static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct net_device *bridge_dev = felix_classify_db(db);
- struct ocelot *ocelot = ds->priv;
- int cpu = ocelot->num_phys_ports;
- int err;
-
- err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
- if (err)
- return err;
-
- return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev);
-}
-
-static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct net_device *bridge_dev = felix_classify_db(db);
- struct switchdev_obj_port_mdb mdb;
- struct ocelot *ocelot = ds->priv;
- int cpu = ocelot->num_phys_ports;
- int err;
-
- memset(&mdb, 0, sizeof(mdb));
- ether_addr_copy(mdb.addr, addr);
- mdb.vid = vid;
-
- err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev);
- if (err)
- return err;
-
- return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev);
-}
-
static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to,
int pgid)
{
@@@ -79,6 -117,49 +79,6 @@@ felix_migrate_flood_to_tag_8021q_port(s
felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC);
}
-/* ocelot->npi was already set to -1 by felix_npi_port_deinit, so
- * ocelot_fdb_add() will not redirect FDB entries towards the
- * CPU port module here, which is what we want.
- */
-static int
-felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct net_device *bridge_dev = felix_classify_db(db);
- struct ocelot *ocelot = ds->priv;
- int cpu = ocelot->num_phys_ports;
- int err;
-
- err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev);
- if (err)
- return err;
-
- return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
-}
-
-static int
-felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct net_device *bridge_dev = felix_classify_db(db);
- struct switchdev_obj_port_mdb mdb;
- struct ocelot *ocelot = ds->priv;
- int cpu = ocelot->num_phys_ports;
- int err;
-
- memset(&mdb, 0, sizeof(mdb));
- ether_addr_copy(mdb.addr, addr);
- mdb.vid = vid;
-
- err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev);
- if (err)
- return err;
-
- return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev);
-}
-
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
@@@ -322,6 -403,7 +322,7 @@@ static int felix_update_trapping_destin
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
@@@ -341,9 -423,13 +342,13 @@@
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */
- list_for_each_entry(trap, &ocelot->traps, trap_list) {
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
+
/* Figure out the current trapping destination */
if (using_tag_8021q) {
/* Redirect to the tag_8021q CPU port. If timestamps
@@@ -407,11 -493,14 +412,11 @@@ static int felix_setup_tag_8021q(struc
if (err)
return err;
- err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
+ err = ocelot_migrate_mdbs(ocelot, BIT(ocelot->num_phys_ports),
+ BIT(cpu));
if (err)
goto out_tag_8021q_unregister;
- err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port);
- if (err)
- goto out_migrate_fdbs;
-
felix_migrate_flood_to_tag_8021q_port(ds, cpu);
err = felix_update_trapping_destinations(ds, true);
@@@ -431,7 -520,9 +436,7 @@@
out_migrate_flood:
felix_migrate_flood_to_npi_port(ds, cpu);
- dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
-out_migrate_fdbs:
- dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+ ocelot_migrate_mdbs(ocelot, BIT(cpu), BIT(ocelot->num_phys_ports));
out_tag_8021q_unregister:
dsa_tag_8021q_unregister(ds);
return err;
@@@ -511,16 -602,24 +516,16 @@@ static int felix_setup_tag_npi(struct d
struct ocelot *ocelot = ds->priv;
int err;
- err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+ err = ocelot_migrate_mdbs(ocelot, BIT(cpu),
+ BIT(ocelot->num_phys_ports));
if (err)
return err;
- err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
- if (err)
- goto out_migrate_fdbs;
-
felix_migrate_flood_to_npi_port(ds, cpu);
felix_npi_port_init(ocelot, cpu);
return 0;
-
-out_migrate_fdbs:
- dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
-
- return err;
}
static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
@@@ -1008,7 -1107,6 +1013,7 @@@ static const u32 felix_phy_match_table[
[PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
[PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
[PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
[PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
};
@@@ -1104,6 -1202,7 +1109,6 @@@ static int felix_init_structs(struct fe
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
- ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->vcap_pol.base = felix->info->vcap_pol_base;
diff --combined drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index ea740210803f,8201ce7adb77..25129e723b57
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@@ -7,37 -7,15 +7,37 @@@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@@ -49,10 -27,9 +49,10 @@@
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@@ -70,7 -47,7 +70,7 @@@
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@@ -81,26 -58,21 +81,26 @@@ err_exit
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@@ -112,7 -84,7 +112,7 @@@
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@@ -120,7 -92,8 +120,7 @@@
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@@ -144,7 -117,6 +144,7 @@@ static struct aq_ring_s *aq_ring_alloc(
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@@ -200,22 -172,11 +200,22 @@@ struct aq_ring_s *aq_ring_rx_alloc(stru
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@@ -337,26 -298,15 +337,26 @@@ bool aq_ring_tx_clean(struct aq_ring_s
}
}
- if (unlikely(buff->is_eop && buff->skb)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
- buff->skb = NULL;
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@@ -389,162 -339,13 +389,161 @@@ static void aq_rx_checksum(struct aq_ri
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ goto pass;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+pass:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@@ -562,12 -363,17 +561,17 @@@
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@@ -575,18 -381,17 +579,17 @@@
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@@ -644,16 -449,15 +647,15 @@@
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
@@@ -667,7 -471,7 +669,7 @@@
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@@ -708,149 -512,6 +710,149 @@@ err_exit
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@@ -870,6 -531,7 +872,6 @@@
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@@ -883,9 -545,9 +885,9 @@@
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@@ -940,15 -602,6 +942,15 @@@ unsigned int aq_ring_fill_stats_data(st
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
diff --combined drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 878a53abec33,15ede7285fb5..54e70f07b573
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@@ -766,7 -766,7 +766,7 @@@ int hw_atl_b0_hw_ring_rx_init(struct aq
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@@ -889,6 -889,13 +889,13 @@@ int hw_atl_b0_hw_ring_tx_head_update(st
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
@@@ -969,15 -976,15 +976,15 @@@ int hw_atl_b0_hw_ring_rx_receive(struc
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
diff --combined drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 65606351634e,e87e46c47387..8309fb993cdb
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@@ -2671,7 -2671,8 +2671,7 @@@ static void bcmgenet_init_tx_ring(struc
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
}
/* Initialize a RDMA ring */
@@@ -3998,6 -3999,10 +3998,10 @@@ static int bcmgenet_probe(struct platfo
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
diff --combined drivers/net/ethernet/dec/tulip/tulip_core.c
index 434d8bf0e8f9,0040dcaab945..825e81f5fd22
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@@ -1399,8 -1399,10 +1399,10 @@@ static int tulip_init_one(struct pci_de
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
- if (!dev)
+ if (!dev) {
+ pci_disable_device(pdev);
return -ENOMEM;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@@ -1689,7 -1691,7 +1691,7 @@@
dev->netdev_ops = &tulip_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_TULIP_NAPI
- netif_napi_add(dev, &tp->napi, tulip_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16);
#endif
dev->ethtool_ops = &ops;
@@@ -1785,6 -1787,7 +1787,7 @@@ err_out_free_res
err_out_free_netdev:
free_netdev (dev);
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c
index 358c2edc118d,98871f014994..332a608dbaa6
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@@ -77,7 -77,6 +77,7 @@@ static const struct pci_device_id i40e_
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
@@@ -786,7 -785,6 +786,7 @@@ static void i40e_update_vsi_stats(struc
unsigned int start;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@@ -806,7 -804,6 +806,7 @@@
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
rx_reuse = 0;
@@@ -831,7 -828,6 +831,7 @@@
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */
p = READ_ONCE(vsi->rx_rings[q]);
@@@ -876,7 -872,6 +876,7 @@@
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
vsi->rx_page_reuse = rx_reuse;
@@@ -7554,42 -7549,43 +7554,43 @@@ static void i40e_free_macvlan_channels(
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
- struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list))
- return -EINVAL;
-
/* Go through the list and find an available channel */
- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
- if (!i40e_is_channel_macvlan(ch)) {
- ch->fwd = fwd;
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
- ch->num_queue_pairs,
- ch->base_queue);
- for (i = 0; i < ch->num_queue_pairs; i++) {
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
- pf_q = ch->base_queue + i;
+ pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
- tx_ring->ch = ch;
+ tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
- rx_ring->ch = ch;
+ rx_ring->ch = iter;
}
+ ch = iter;
break;
}
}
+ if (!ch)
+ return -EINVAL;
+
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/
@@@ -13441,7 -13437,8 +13442,7 @@@ static int i40e_config_netdev(struct i4
np->vsi = vsi;
hw_enc_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
@@@ -13472,23 -13469,6 +13473,23 @@@
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
/* enable macvlan offloads */
netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
diff --combined drivers/net/ethernet/intel/ice/ice.h
index 2fbb3d737dfd,a895e3a8e988..60453b3b8d23
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@@ -540,6 -540,7 +540,7 @@@ struct ice_pf
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
+ struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver;
@@@ -757,21 -758,6 +758,21 @@@ static inline struct ice_vsi *ice_get_c
return pf->vsi[pf->ctrl_vsi_idx];
}
+/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+ return pf->vsi[i];
+ return NULL;
+}
+
/**
* ice_is_switchdev_running - check if switchdev is configured
* @pf: pointer to PF structure
diff --combined drivers/net/ethernet/intel/ice/ice_idc.c
index 7e941264ae21,3e3b2ed4cd5d..895c32bcc8b5
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@@ -37,16 -37,34 +37,19 @@@ void ice_send_event_to_aux(struct ice_p
if (WARN_ON_ONCE(!in_task()))
return;
+ mutex_lock(&pf->adev_mutex);
if (!pf->adev)
- return;
+ goto finish;
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
+ finish:
+ mutex_unlock(&pf->adev_mutex);
}
-/**
- * ice_find_vsi - Find the VSI from VSI ID
- * @pf: The PF pointer to search in
- * @vsi_num: The VSI ID to search for
- */
-static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
- return pf->vsi[i];
- return NULL;
-}
-
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
* @pf: PF struct
@@@ -275,7 -293,6 +278,6 @@@ int ice_plug_aux_dev(struct ice_pf *pf
return -ENOMEM;
adev = &iadev->adev;
- pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
@@@ -285,18 -302,20 +287,20 @@@
ret = auxiliary_device_init(adev);
if (ret) {
- pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
- pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
+ mutex_lock(&pf->adev_mutex);
+ pf->adev = adev;
+ mutex_unlock(&pf->adev_mutex);
+
return 0;
}
@@@ -305,12 -324,17 +309,17 @@@
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
- if (!pf->adev)
- return;
+ struct auxiliary_device *adev;
- auxiliary_device_delete(pf->adev);
- auxiliary_device_uninit(pf->adev);
+ mutex_lock(&pf->adev_mutex);
+ adev = pf->adev;
pf->adev = NULL;
+ mutex_unlock(&pf->adev_mutex);
+
+ if (adev) {
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+ }
}
/**
diff --combined drivers/net/ethernet/intel/ice/ice_main.c
index 867908f94661,949669fed7d6..4a5d4d971161
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@@ -296,20 -296,6 +296,20 @@@ static int ice_clear_promisc(struct ice
return status;
}
+/**
+ * ice_get_devlink_port - Get devlink port from netdev
+ * @netdev: the netdevice structure
+ */
+static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (!ice_is_switchdev_running(pf))
+ return NULL;
+
+ return &pf->devlink_port;
+}
+
/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
@@@ -3350,9 -3336,7 +3350,9 @@@ static void ice_set_netdev_features(str
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
@@@ -3785,6 -3769,7 +3785,7 @@@ u16 ice_get_avail_rxq_count(struct ice_
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
@@@ -3863,6 -3848,7 +3864,7 @@@ static int ice_init_pf(struct ice_pf *p
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@@ -5688,12 -5674,11 +5690,12 @@@ ice_fdb_add(struct ndmsg *ndm, struct n
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+ __always_unused u16 vid, struct netlink_ext_ack *extack)
{
int err;
@@@ -8941,5 -8926,4 +8943,5 @@@ static const struct net_device_ops ice_
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_get_devlink_port = ice_get_devlink_port,
};
diff --combined drivers/net/ethernet/intel/ice/ice_virtchnl.c
index b47577a2841a,2889e050a4c9..1d9b84c3937a
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@@ -514,6 -514,24 +514,6 @@@ static void ice_vc_reset_vf_msg(struct
ice_reset_vf(vf, 0);
}
-/**
- * ice_find_vsi_from_id
- * @pf: the PF structure to search for the VSI
- * @id: ID of the VSI it is searching for
- *
- * searches for the VSI with the given ID
- */
-static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
- return pf->vsi[i];
-
- return NULL;
-}
-
/**
* ice_vc_isvalid_vsi_id
* @vf: pointer to the VF info
@@@ -526,7 -544,7 +526,7 @@@ bool ice_vc_isvalid_vsi_id(struct ice_v
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_find_vsi_from_id(pf, vsi_id);
+ vsi = ice_find_vsi(pf, vsi_id);
return (vsi && (vsi->vf == vf));
}
@@@ -541,7 -559,7 +541,7 @@@
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
return (vsi && (qid < vsi->alloc_txq));
}
@@@ -1289,13 -1307,52 +1289,52 @@@ error_param
NULL, 0);
}
+ /**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+ static int
+ ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+ {
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but
stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI
%d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+ }
+
/**
* ice_vc_dis_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * called from the VF to disable all or specific
- * queue(s)
+ * called from the VF to disable all or specific queue(s)
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
@@@ -1332,30 -1389,15 +1371,15 @@@
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
- struct ice_txq_meta txq_meta = { 0 };
-
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(vf_q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but
stopping it anyway\n",
- vf_q_id, vsi->vsi_num);
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
- ring, &txq_meta)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI
%d\n",
- vf_q_id, vsi->vsi_num);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->txq_ena);
}
}
@@@ -1604,6 -1646,14 +1628,14 @@@ static int ice_vc_cfg_qs_msg(struct ice
if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@@ -2342,11 -2392,6 +2374,11 @@@ static int ice_vc_ena_vlan_stripping(st
}
vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
diff --combined drivers/net/ethernet/mediatek/mtk_ppe.c
index 683f89f8e3b2,66298e2235c9..dab8f3f771f8
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@@ -6,22 -6,9 +6,22 @@@
#include <linux/iopoll.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
+ .automatic_shrinking = true,
+};
+
static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
{
writel(val, ppe->base + reg);
@@@ -54,11 -41,6 +54,11 @@@ static u32 ppe_clear(struct mtk_ppe *pp
return ppe_m32(ppe, reg, val, 0);
}
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
{
int ret;
@@@ -94,6 -76,13 +94,6 @@@ static u32 mtk_ppe_hash_entry(struct mt
u32 hash;
switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
- case MTK_PPE_PKT_TYPE_BRIDGE:
- hv1 = e->bridge.src_mac_lo;
- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
- hv2 = e->bridge.src_mac_hi >> 16;
- hv2 ^= e->bridge.dest_mac_lo;
- hv3 = e->bridge.dest_mac_hi;
- break;
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@@ -133,9 -122,6 +133,9 @@@ mtk_foe_entry_l2(struct mtk_foe_entry *
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
+
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.l2;
@@@ -147,9 -133,6 +147,9 @@@ mtk_foe_entry_ib2(struct mtk_foe_entry
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
+
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.ib2;
@@@ -184,12 -167,7 +184,12 @@@ int mtk_foe_entry_prepare(struct mtk_fo
if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
entry->ipv6.ports = ports_pad;
- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+ ether_addr_copy(entry->bridge.src_mac, src_mac);
+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+ entry->bridge.ib2 = val;
+ l2 = &entry->bridge.l2;
+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
entry->ipv6.ib2 = val;
l2 = &entry->ipv6.l2;
} else {
@@@ -351,167 -329,32 +351,167 @@@ int mtk_foe_entry_set_pppoe(struct mtk_
return 0;
}
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+ int bss, int wcid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ u32 *ib2 = mtk_foe_entry_ib2(entry);
+
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+
+ return 0;
+}
+
static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
{
return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
}
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+{
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+ head = &entry->l2_flows;
+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+ __mtk_foe_entry_clear(ppe, entry);
+ return;
+ }
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_BIND);
+ dma_wmb();
+ }
+ entry->hash = 0xffff;
+
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+ hlist_del_init(&entry->l2_data.list);
+ kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+ u16 timestamp;
+ u16 now;
+
+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+
+ if (timestamp > now)
+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+ else
+ return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
- u32 hash;
+ struct hlist_node *tmp;
+ int idle;
+
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+ int cur_idle;
+ u32 ib1;
+
+ hwe = &ppe->foe_table[cur->hash];
+ ib1 = READ_ONCE(hwe->ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+ cur->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, cur);
+ continue;
+ }
+
+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_foe_entry *hwe;
+ struct mtk_foe_entry foe;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ mtk_flow_entry_update_l2(ppe, entry);
+ goto out;
+ }
+
+ if (entry->hash == 0xffff)
+ goto out;
+
+ hwe = &ppe->foe_table[entry->hash];
+ memcpy(&foe, hwe, sizeof(foe));
+ if (!mtk_flow_entry_match(entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+ }
+
+ entry->data.ib1 = foe.ib1;
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+{
+ struct mtk_foe_entry *hwe;
+ u16 timestamp;
+ timestamp = mtk_eth_timestamp(ppe->eth);
timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
- hash = mtk_ppe_hash_entry(entry);
hwe = &ppe->foe_table[hash];
- if (!mtk_foe_entry_usable(hwe)) {
- hwe++;
- hash++;
-
- if (!mtk_foe_entry_usable(hwe))
- return -ENOSPC;
- }
-
memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
wmb();
hwe->ib1 = entry->ib1;
@@@ -519,195 -362,32 +519,195 @@@
dma_wmb();
mtk_ppe_cache_clear(ppe);
+}
- return hash;
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ spin_lock_bh(&ppe_lock);
+ __mtk_foe_entry_clear(ppe, entry);
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ entry->type = MTK_FLOW_TYPE_L2;
+
+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+ hash = mtk_ppe_hash_entry(&entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+{
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+ GFP_ATOMIC);
+ if (!flow_info)
+ return;
+
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = &ppe->foe_table[hash];
+ memcpy(&foe, hwe, sizeof(foe));
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+ l2 = mtk_foe_entry_l2(&foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype ==
ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
}
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ struct hlist_head *head = &ppe->foe_flow[hash / 2];
+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+ hlist_for_each_entry_safe(entry, n, head, list) {
+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+ MTK_FOE_STATE_BIND))
+ continue;
+
+ entry->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, entry);
+ continue;
+ }
+
+ if (found || !mtk_flow_entry_match(entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+ }
+
+ entry->hash = hash;
+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
+ found = true;
+ }
+
+ if (found)
+ goto out;
+
+ eh = eth_hdr(skb);
+ ether_addr_copy(key.dest_mac, eh->h_dest);
+ ether_addr_copy(key.src_mac, eh->h_source);
+ tag = skb->data - 2;
+ key.vlan = 0;
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+ case htons(ETH_P_XDSA):
+ if (!netdev_uses_dsa(skb->dev) ||
+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ goto out;
+
+ tag += 4;
+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ break;
+
+ fallthrough;
+#endif
+ case htons(ETH_P_8021Q):
+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+ break;
+ default:
+ break;
+ }
+
+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key,
mtk_flow_l2_ht_params);
+ if (!entry)
+ goto out;
+
+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ mtk_flow_entry_update(ppe, entry);
+
+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version)
{
+ struct device *dev = eth->dev;
struct mtk_foe_entry *foe;
+ struct mtk_ppe *ppe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return NULL;
+
+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
/* need to allocate a separate device, since it PPE DMA access is
* not coherent.
*/
ppe->base = base;
+ ppe->eth = eth;
ppe->dev = dev;
ppe->version = version;
foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
- return -ENOMEM;
+ return NULL;
ppe->foe_table = foe;
mtk_ppe_debugfs_init(ppe);
- return 0;
+ return ppe;
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
@@@ -715,7 -395,7 +715,7 @@@
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
@@@ -763,6 -443,7 +763,6 @@@ int mtk_ppe_start(struct mtk_ppe *ppe
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
- MTK_PPE_FLOW_CFG_L2_BRIDGE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
diff --combined drivers/net/ethernet/mscc/ocelot.c
index 9336f3b00c6e,20ceac81a2c2..5f81938c58a9
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@@ -1622,7 -1622,7 +1622,7 @@@ int ocelot_trap_add(struct ocelot *ocel
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
trap->take_ts = take_ts;
- list_add_tail(&trap->trap_list, &ocelot->traps);
+ trap->is_trap = true;
new = true;
}
@@@ -1634,10 -1634,8 +1634,8 @@@
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask) {
- list_del(&trap->trap_list);
+ if (!trap->ingress_port_mask)
kfree(trap);
- }
return err;
}
@@@ -1657,11 -1655,8 +1655,8 @@@ int ocelot_trap_del(struct ocelot *ocel
return 0;
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask) {
- list_del(&trap->trap_list);
-
+ if (!trap->ingress_port_mask)
return ocelot_vcap_filter_del(ocelot, trap);
- }
return ocelot_vcap_filter_replace(ocelot, trap);
}
@@@ -2610,67 -2605,6 +2605,67 @@@ static void ocelot_setup_logical_port_i
}
}
+static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc,
+ unsigned long from_mask, unsigned long to_mask)
+{
+ unsigned char addr[ETH_ALEN];
+ struct ocelot_pgid *pgid;
+ u16 vid = mc->vid;
+
+ dev_dbg(ocelot->dev,
+ "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n",
+ mc->addr, mc->vid, from_mask, to_mask);
+
+ /* First clean up the current port mask from hardware, because
+ * we'll be modifying it.
+ */
+ ocelot_pgid_free(ocelot, mc->pgid);
+ ocelot_encode_ports_to_mdb(addr, mc);
+ ocelot_mact_forget(ocelot, addr, vid);
+
+ mc->ports &= ~from_mask;
+ mc->ports |= to_mask;
+
+ pgid = ocelot_mdb_get_pgid(ocelot, mc);
+ if (IS_ERR(pgid)) {
+ dev_err(ocelot->dev,
+ "Cannot allocate PGID for mdb %pM vid %d\n",
+ mc->addr, mc->vid);
+ devm_kfree(ocelot->dev, mc);
+ return PTR_ERR(pgid);
+ }
+ mc->pgid = pgid;
+
+ ocelot_encode_ports_to_mdb(addr, mc);
+
+ if (mc->entry_type != ENTRYTYPE_MACv4 &&
+ mc->entry_type != ENTRYTYPE_MACv6)
+ ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
+ pgid->index);
+
+ return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
+ mc->entry_type);
+}
+
+int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
+ unsigned long to_mask)
+{
+ struct ocelot_multicast *mc;
+ int err;
+
+ list_for_each_entry(mc, &ocelot->multicast, list) {
+ if (!(mc->ports & from_mask))
+ continue;
+
+ err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs);
+
/* Documentation for PORTID_VAL says:
* Logical port number for front port. If port is not a member of a LLAG,
* then PORTID must be set to the physical port number.
@@@ -3289,7 -3223,6 +3284,7 @@@ static void ocelot_detect_features(stru
int ocelot_init(struct ocelot *ocelot)
{
+ const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@@ -3302,10 -3235,6 +3297,10 @@@
}
}
+ ocelot->num_stats = 0;
+ for_each_stat(ocelot, stat)
+ ocelot->num_stats++;
+
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
diff --combined drivers/net/ethernet/mscc/ocelot_vcap.c
index cdbe29f2ddc7,eeb4cc07dd16..73cdec5ca6a3
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@@ -374,7 -374,6 +374,6 @@@ static void is2_entry_set(struct ocelo
OCELOT_VCAP_BIT_0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
@@@ -672,10 -671,12 +671,10 @@@ static void is1_entry_set(struct ocelo
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix / 2;
u32 type;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@@ -811,9 -812,11 +810,9 @@@ static void es0_entry_set(struct ocelo
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@@ -914,7 -917,7 +913,7 @@@ int ocelot_vcap_policer_add(struct ocel
if (!tmp)
return -ENOMEM;
- ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ ret = qos_policer_conf_set(ocelot, pol_ix, &pp);
if (ret) {
kfree(tmp);
return ret;
@@@ -945,7 -948,7 +944,7 @@@ int ocelot_vcap_policer_del(struct ocel
if (z) {
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ return qos_policer_conf_set(ocelot, pol_ix, &pp);
}
return 0;
@@@ -993,8 -996,8 +992,8 @@@ static int ocelot_vcap_filter_add_to_bl
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
+ struct list_head *pos = &block->rules;
struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *n;
int ret;
ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
@@@ -1003,13 -1006,17 +1002,13 @@@
block->count++;
- if (list_empty(&block->rules)) {
- list_add(&filter->list, &block->rules);
- return 0;
- }
-
- list_for_each_safe(pos, n, &block->rules) {
- tmp = list_entry(pos, struct ocelot_vcap_filter, list);
- if (filter->prio < tmp->prio)
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (filter->prio < tmp->prio) {
+ pos = &tmp->list;
break;
+ }
}
- list_add(&filter->list, pos->prev);
+ list_add_tail(&filter->list, pos);
return 0;
}
@@@ -1209,6 -1216,8 +1208,8 @@@ int ocelot_vcap_filter_add(struct ocelo
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i - 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@@ -1242,7 -1251,11 +1243,11 @@@ int ocelot_vcap_filter_del(struct ocelo
struct ocelot_vcap_filter del_filter;
int i, index;
+ /* Need to inherit the block_id so that vcap_entry_set()
+ * does not get confused and knows where to install it.
+ */
memset(&del_filter, 0, sizeof(del_filter));
+ del_filter.block_id = filter->block_id;
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@@ -1257,6 -1270,8 +1262,8 @@@
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i + 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@@ -1394,18 -1409,22 +1401,18 @@@ static void ocelot_vcap_detect_constant
int ocelot_vcap_init(struct ocelot *ocelot)
{
- int i;
+ struct qos_policer_conf cpu_drop = {
+ .mode = MSCC_QOS_RATE_MODE_DATA,
+ };
+ int ret, i;
/* Create a policer that will drop the frames for the cpu.
* This policer will be used as action in the acl rules to drop
* frames.
*/
- ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
- OCELOT_POLICER_DISCARD);
+ ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop);
+ if (ret)
+ return ret;
for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
struct ocelot_vcap_block *block = &ocelot->block[i];
diff --combined drivers/net/ethernet/sfc/ef10.c
index c9ee5011803f,f8edb3f1b73a..186cb28c03bd
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@@ -2256,7 -2256,7 +2256,7 @@@ int efx_ef10_tx_tso_desc(struct efx_tx_
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
@@@ -3579,6 -3579,11 +3579,11 @@@ static int efx_ef10_mtd_probe(struct ef
n_parts++;
}
+ if (!n_parts) {
+ kfree(parts);
+ return 0;
+ }
+
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
diff --combined drivers/net/ethernet/sfc/efx_channels.c
index 3f28f9861dfa,40df910aa140..79df636d6df8
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@@ -51,7 -51,28 +51,7 @@@ MODULE_PARM_DESC(irq_adapt_high_thresh
*/
static int napi_weight = 64;
-/***************
- * Housekeeping
- ***************/
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
+static const struct efx_channel_type efx_default_channel_type;
/*************
* INTERRUPTS
@@@ -598,7 -619,6 +598,6 @@@ void efx_fini_channels(struct efx_nic *
/* Allocate and initialise a channel structure, copying parameters
* (but not resources) from an old channel structure.
*/
- static
struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
{
struct efx_rx_queue *rx_queue;
@@@ -676,8 -696,7 +675,8 @@@ fail
return rc;
}
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+ size_t len)
{
struct efx_nic *efx = channel->efx;
const char *type;
@@@ -848,7 -867,9 +847,9 @@@ static void efx_set_xdp_channels(struc
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
+ *ptp_channel = efx_ptp_channel(efx);
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
unsigned int i, next_buffer_table = 0;
u32 old_rxq_entries, old_txq_entries;
int rc, rc2;
@@@ -919,6 -940,7 +920,7 @@@
efx_set_xdp_channels(efx);
out:
+ efx->ptp_data = NULL;
/* Destroy unused channel structures */
for (i = 0; i < efx->n_channels; i++) {
channel = other_channel[i];
@@@ -929,6 -951,7 +931,7 @@@
}
}
+ efx->ptp_data = ptp_data;
rc2 = efx_soft_enable_interrupts(efx);
if (rc2) {
rc = rc ? rc : rc2;
@@@ -947,6 -970,7 +950,7 @@@ rollback
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++)
swap(efx->channel[i], other_channel[i]);
+ efx_ptp_update_channel(efx, ptp_channel);
goto out;
}
@@@ -985,7 -1009,7 +989,7 @@@ int efx_set_channels(struct efx_nic *ef
return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
}
-bool efx_default_channel_want_txqs(struct efx_channel *channel)
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
{
return channel->channel - channel->efx->tx_channel_offset <
channel->efx->n_tx_channels;
@@@ -1316,8 -1340,8 +1320,8 @@@ void efx_init_napi_channel(struct efx_c
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
+ netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll,
+ napi_weight);
}
void efx_init_napi(struct efx_nic *efx)
@@@ -1343,26 -1367,3 +1347,26 @@@ void efx_fini_napi(struct efx_nic *efx
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
diff --combined drivers/net/ethernet/sfc/efx_channels.h
index 64abb99a56b8,d77ec1f77fb1..46b702648721
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@@ -32,13 -32,16 +32,14 @@@ void efx_fini_eventq(struct efx_channe
void efx_remove_eventq(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
int efx_init_channels(struct efx_nic *efx);
int efx_probe_channels(struct efx_nic *efx);
int efx_set_channels(struct efx_nic *efx);
-bool efx_default_channel_want_txqs(struct efx_channel *channel);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx);
+ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
@@@ -47,6 -50,7 +48,6 @@@ void efx_init_napi(struct efx_nic *efx)
void efx_fini_napi_channel(struct efx_channel *channel);
void efx_fini_napi(struct efx_nic *efx);
-int efx_channel_dummy_op_int(struct efx_channel *channel);
void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif
diff --combined drivers/net/phy/micrel.c
index 685a0ab5453c,cd9aa353b653..c34a93403d1e
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@@ -32,7 -32,6 +32,7 @@@
#include <linux/ptp_clock.h>
#include <linux/ptp_classify.h>
#include <linux/net_tstamp.h>
+#include <linux/gpio/consumer.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@@ -71,27 -70,6 +71,27 @@@
#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+#define KSZ9x31_LMD 0x12
+#define KSZ9x31_LMD_VCT_EN BIT(15)
+#define KSZ9x31_LMD_VCT_DIS_TX BIT(14)
+#define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12)
+#define KSZ9x31_LMD_VCT_SEL_RESULT 0
+#define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10)
+#define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11)
+#define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10)
+#define KSZ9x31_LMD_VCT_ST_NORMAL 0
+#define KSZ9x31_LMD_VCT_ST_OPEN 1
+#define KSZ9x31_LMD_VCT_ST_SHORT 2
+#define KSZ9x31_LMD_VCT_ST_FAIL 3
+#define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8)
+#define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7)
+#define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6)
+#define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5)
+#define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4)
+#define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2)
+#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0)
+#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
@@@ -302,7 -280,6 +302,7 @@@ struct kszphy_priv
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
int led_mode;
+ u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
@@@ -1349,199 -1326,6 +1349,199 @@@ static int ksz9031_read_status(struct p
return 0;
}
+static int ksz9x31_cable_test_start(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * Prior to running the cable diagnostics, Auto-negotiation should
+ * be disabled, full duplex set and the link speed set to 1000Mbps
+ * via the Basic Control Register.
+ */
+ ret = phy_modify(phydev, MII_BMCR,
+ BMCR_SPEED1000 | BMCR_FULLDPLX |
+ BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (ret)
+ return ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * The Master-Slave configuration should be set to Slave by writing
+ * a value of 0x1000 to the Auto-Negotiation Master Slave Control
+ * Register.
+ */
+ ret = phy_read(phydev, MII_CTRL1000);
+ if (ret < 0)
+ return ret;
+
+ /* Cache these bits, they need to be restored once LinkMD finishes. */
+ priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret |= CTL1000_ENABLE_MASTER;
+
+ return phy_write(phydev, MII_CTRL1000, ret);
+}
+
+static int ksz9x31_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case KSZ9x31_LMD_VCT_ST_FAIL:
+ fallthrough;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool ksz9x31_cable_test_failed(u16 status)
+{
+ int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status);
+
+ return stat == KSZ9x31_LMD_VCT_ST_FAIL;
+}
+
+static bool ksz9x31_cable_test_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ fallthrough;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat)
+{
+ int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat);
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ *
+ * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity
+ */
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131)
+ dt = clamp(dt - 22, 0, 255);
+
+ return (dt * 400) / 10;
+}
+
+static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val,
+ !(val & KSZ9x31_LMD_VCT_EN),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ksz9x31_cable_test_get_pair(int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+
+ return ethtool_pair[pair];
+}
+
+static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ int ret, val;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * To test each individual cable pair, set the cable pair in the Cable
+ * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable
+ * Diagnostic Register, along with setting the Cable Diagnostics Test
+ * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit
+ * will self clear when the test is concluded.
+ */
+ ret = phy_write(phydev, KSZ9x31_LMD,
+ KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair));
+ if (ret)
+ return ret;
+
+ ret = ksz9x31_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, KSZ9x31_LMD);
+ if (val < 0)
+ return val;
+
+ if (ksz9x31_cable_test_failed(val))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_result_trans(val));
+ if (ret)
+ return ret;
+
+ if (!ksz9x31_cable_test_fault_length_valid(val))
+ return 0;
+
+ return ethnl_cable_test_fault_length(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_fault_length(phydev, val));
+}
+
+static int ksz9x31_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ unsigned long pair_mask = 0xf;
+ int retries = 20;
+ int pair, ret, rv;
+
+ *finished = false;
+
+ /* Try harder if link partner is active */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ksz9x31_cable_test_one_pair(phydev, pair);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+ clear_bit(pair, &pair_mask);
+ }
+ /* If link partner is in autonegotiation mode it will send 2ms
+ * of FLPs with at least 6ms of silence.
+ * Add 2ms sleep to have better chances to hit this silence.
+ */
+ if (pair_mask)
+ usleep_range(2000, 3000);
+ }
+
+ /* Report remaining unfinished pair result as unknown. */
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ *finished = true;
+
+ /* Restore cached bits from before LinkMD got started. */
+ rv = phy_modify(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER,
+ priv->vct_ctrl1000);
+ if (rv)
+ return rv;
+
+ return ret;
+}
+
static int ksz8873mll_config_aneg(struct phy_device *phydev)
{
return 0;
@@@ -1959,7 -1743,7 +1959,7 @@@ static int ksz886x_cable_test_get_statu
static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
{
- u32 data;
+ int data;
phy_lock_mdio_bus(phydev);
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
@@@ -2660,8 -2444,7 +2660,7 @@@ static int lan8804_config_init(struct p
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
- u16 tsu_irq_status;
- int irq_status;
+ int irq_status, tsu_irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
@@@ -2730,10 -2513,6 +2729,10 @@@ static void lan8814_ptp_init(struct phy
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
u32 temp;
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return;
+
lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
@@@ -2772,10 -2551,6 +2771,10 @@@ static int lan8814_ptp_probe_once(struc
{
struct lan8814_shared_priv *shared = phydev->shared->priv;
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return 0;
+
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@@ -2838,21 -2613,6 +2837,21 @@@ static int lan8814_config_init(struct p
return 0;
}
+static int lan8814_release_coma_mode(struct phy_device *phydev)
+{
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
+ GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod_set_consumer_name(gpiod, "LAN8814 coma mode");
+ gpiod_set_value_cansleep(gpiod, 0);
+
+ return 0;
+}
+
static int lan8814_probe(struct phy_device *phydev)
{
struct kszphy_priv *priv;
@@@ -2867,6 -2627,10 +2866,6 @@@
phydev->priv = priv;
- if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
- !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
- return 0;
-
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
@@@ -2875,10 -2639,6 +2874,10 @@@
addr, sizeof(struct lan8814_shared_priv));
if (phy_package_init_once(phydev)) {
+ err = lan8814_release_coma_mode(phydev);
+ if (err)
+ return err;
+
err = lan8814_ptp_probe_once(phydev);
if (err)
return err;
@@@ -2896,6 -2656,7 +2895,7 @@@ static struct phy_driver ksphy_driver[
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
+ .probe = kszphy_probe,
.config_init = kszphy_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
@@@ -3021,8 -2782,8 +3021,8 @@@
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@@ -3045,7 -2806,6 +3045,7 @@@
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.get_features = ksz9031_get_features,
@@@ -3059,8 -2819,6 +3059,8 @@@
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@@ -3095,7 -2853,6 +3095,7 @@@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@@ -3106,8 -2863,6 +3106,8 @@@
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --combined drivers/net/phy/phy.c
index 9034c6a8e18f,f122026c4682..ef62f357b76d
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@@ -295,20 -295,20 +295,20 @@@ int phy_mii_ioctl(struct phy_device *ph
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = mdiobus_c45_addr(devad, mii_data->reg_num);
+ mii_data->val_out = mdiobus_c45_read(
+ phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num);
} else {
- prtad = mii_data->phy_id;
- devad = mii_data->reg_num;
+ mii_data->val_out = mdiobus_read(
+ phydev->mdio.bus, mii_data->phy_id,
+ mii_data->reg_num);
}
- mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad,
- devad);
return 0;
case SIOCSMIIREG:
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = mdiobus_c45_addr(devad, mii_data->reg_num);
} else {
prtad = mii_data->phy_id;
devad = mii_data->reg_num;
@@@ -351,11 -351,7 +351,11 @@@
}
}
- mdiobus_write(phydev->mdio.bus, prtad, devad, val);
+ if (mdio_phy_id_is_c45(mii_data->phy_id))
+ mdiobus_c45_write(phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num, val);
+ else
+ mdiobus_write(phydev->mdio.bus, prtad, devad, val);
if (prtad == phydev->mdio.addr &&
devad == MII_BMCR &&
@@@ -974,8 -970,13 +974,13 @@@ static irqreturn_t phy_interrupt(int ir
{
struct phy_device *phydev = phy_dat;
struct phy_driver *drv = phydev->drv;
+ irqreturn_t ret;
- return drv->handle_interrupt(phydev);
+ mutex_lock(&phydev->lock);
+ ret = drv->handle_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
/**
diff --combined drivers/net/wireless/mac80211_hwsim.c
index afdf48550588,e9ec63e0e395..2f746eb64507
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@@ -2189,7 -2189,7 +2189,7 @@@ mac80211_hwsim_sta_rc_update(struct iee
u32 bw = U32_MAX;
enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
C(20);
C(40);
@@@ -2202,16 -2202,19 +2202,19 @@@
if (!data->use_chanctx) {
confbw = data->bw;
} else {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference(vif->chanctx_conf);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (!WARN_ON(!chanctx_conf))
confbw = chanctx_conf->def.width;
+ rcu_read_unlock();
}
WARN(bw > hwsim_get_chanwidth(confbw),
"intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz
(%d)\n",
- vif->addr, sta->addr, bw, sta->bandwidth,
+ vif->addr, sta->addr, bw, sta->deflink.bandwidth,
hwsim_get_chanwidth(data->bw), data->bw);
}
@@@ -2475,11 -2478,13 +2478,13 @@@ static void hw_scan_work(struct work_st
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ rcu_read_lock();
if (!ieee80211_tx_prepare_skb(hwsim->hw,
hwsim->hw_scan_vif,
probe,
hwsim->tmp_chan->band,
NULL)) {
+ rcu_read_unlock();
kfree_skb(probe);
continue;
}
@@@ -2487,6 -2492,7 +2492,7 @@@
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
+ rcu_read_unlock();
local_bh_enable();
}
}
diff --combined net/core/skbuff.c
index 15f7b6f99a8f,c90c74de90d5..bd16e158b366
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -80,6 -80,7 +80,6 @@@
#include <linux/user_namespace.h>
#include <linux/indirect_call_wrapper.h>
-#include "datagram.h"
#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init;
@@@ -203,7 -204,7 +203,7 @@@ static void __build_skb_around(struct s
skb_set_end_offset(skb, size);
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
-
+ skb->alloc_cpu = raw_smp_processor_id();
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
@@@ -1036,7 -1037,6 +1036,7 @@@ static void __copy_skb_header(struct sk
#ifdef CONFIG_NET_RX_BUSY_POLL
CHECK_SKB_FIELD(napi_id);
#endif
+ CHECK_SKB_FIELD(alloc_cpu);
#ifdef CONFIG_XPS
CHECK_SKB_FIELD(sender_cpu);
#endif
@@@ -1165,7 -1165,7 +1165,7 @@@ void mm_unaccount_pinned_pages(struct m
}
EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
-struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
+static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
{
struct ubuf_info *uarg;
struct sk_buff *skb;
@@@ -1196,6 -1196,7 +1196,6 @@@
return uarg;
}
-EXPORT_SYMBOL_GPL(msg_zerocopy_alloc);
static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
{
@@@ -1338,11 -1339,18 +1338,11 @@@ void msg_zerocopy_put_abort(struct ubuf
}
EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
-int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
-{
- return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
-}
-EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
-
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg)
{
struct ubuf_info *orig_uarg = skb_zcopy(skb);
- struct iov_iter orig_iter = msg->msg_iter;
int err, orig_len = skb->len;
/* An skb can only point to one uarg. This edge case happens when
@@@ -1356,7 -1364,7 +1356,7 @@@
struct sock *save_sk = skb->sk;
/* Streams do not free skb on error. Reset to prev state. */
- msg->msg_iter = orig_iter;
+ iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
skb->sk = sk;
___pskb_trim(skb, orig_len);
skb->sk = save_sk;
@@@ -3889,7 -3897,7 +3889,7 @@@ struct sk_buff *skb_segment_list(struc
unsigned int delta_len = 0;
struct sk_buff *tail = NULL;
struct sk_buff *nskb, *tmp;
- int err;
+ int len_diff, err;
skb_push(skb, -skb_network_offset(skb) + offset);
@@@ -3929,9 -3937,11 +3929,11 @@@
skb_push(nskb, -skb_network_offset(nskb) + offset);
skb_release_head_state(nskb);
+ len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
__copy_skb_header(nskb, skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
+ nskb->transport_header += len_diff;
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
nskb->data - tnl_hlen,
offset + tnl_hlen);
@@@ -5593,7 -5603,7 +5595,7 @@@ err_free
}
EXPORT_SYMBOL(skb_vlan_untag);
-int skb_ensure_writable(struct sk_buff *skb, int write_len)
+int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
{
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
@@@ -6478,51 -6488,3 +6480,51 @@@ free_now
}
EXPORT_SYMBOL(__skb_ext_put);
#endif /* CONFIG_SKB_EXTENSIONS */
+
+/**
+ * skb_attempt_defer_free - queue skb for remote freeing
+ * @skb: buffer
+ *
+ * Put @skb in a per-cpu list, using the cpu which
+ * allocated the skb/pages to reduce false sharing
+ * and memory zone spinlock contention.
+ */
+void skb_attempt_defer_free(struct sk_buff *skb)
+{
+ int cpu = skb->alloc_cpu;
+ struct softnet_data *sd;
+ unsigned long flags;
+ bool kick;
+
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
+ !cpu_online(cpu) ||
+ cpu == raw_smp_processor_id()) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ sd = &per_cpu(softnet_data, cpu);
+ /* We do not send an IPI or any signal.
+ * Remote cpu will eventually call skb_defer_free_flush()
+ */
+ spin_lock_irqsave(&sd->defer_lock, flags);
+ skb->next = sd->defer_list;
+ /* Paired with READ_ONCE() in skb_defer_free_flush() */
+ WRITE_ONCE(sd->defer_list, skb);
+ sd->defer_count++;
+
+ /* kick every time queue length reaches 128.
+ * This should avoid blocking in smp_call_function_single_async().
+ * This condition should hardly be bit under normal conditions,
+ * unless cpu suddenly stopped to receive NIC interrupts.
+ */
+ kick = sd->defer_count == 128;
+
+ spin_unlock_irqrestore(&sd->defer_lock, flags);
+
+ /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
+ * if we are unlucky enough (this seems very unlikely).
+ */
+ if (unlikely(kick))
+ smp_call_function_single_async(cpu, &sd->defer_csd);
+}
diff --combined net/dsa/port.c
index 48e5a309ca5c,bdccb613285d..ecf0395cbddd
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@@ -242,59 -242,6 +242,59 @@@ void dsa_port_disable(struct dsa_port *
rtnl_unlock();
}
+static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
+ struct dsa_bridge bridge)
+{
+ struct netlink_ext_ack extack = {0};
+ bool change_vlan_filtering = false;
+ struct dsa_switch *ds = dp->ds;
+ bool vlan_filtering;
+ int err;
+
+ if (ds->needs_standalone_vlan_filtering &&
+ !br_vlan_enabled(bridge.dev)) {
+ change_vlan_filtering = true;
+ vlan_filtering = true;
+ } else if (!ds->needs_standalone_vlan_filtering &&
+ br_vlan_enabled(bridge.dev)) {
+ change_vlan_filtering = true;
+ vlan_filtering = false;
+ }
+
+ /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
+ * event for changing vlan_filtering setting upon slave ports leaving
+ * it. That is a good thing, because that lets us handle it and also
+ * handle the case where the switch's vlan_filtering setting is global
+ * (not per port). When that happens, the correct moment to trigger the
+ * vlan_filtering callback is only when the last port leaves the last
+ * VLAN-aware bridge.
+ */
+ if (change_vlan_filtering && ds->vlan_filtering_is_global) {
+ dsa_switch_for_each_port(dp, ds) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br_vlan_enabled(br)) {
+ change_vlan_filtering = false;
+ break;
+ }
+ }
+ }
+
+ if (!change_vlan_filtering)
+ return;
+
+ err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack);
+ if (extack._msg) {
+ dev_err(ds->dev, "port %d: %s\n", dp->index,
+ extack._msg);
+ }
+ if (err && err != -EOPNOTSUPP) {
+ dev_err(ds->dev,
+ "port %d failed to reset VLAN filtering to %d: %pe\n",
+ dp->index, vlan_filtering, ERR_PTR(err));
+ }
+}
+
static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
@@@ -366,8 -313,7 +366,8 @@@ static int dsa_port_switchdev_sync_attr
return 0;
}
-static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
+static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp,
+ struct dsa_bridge bridge)
{
/* Configure the port for standalone mode (no address learning,
* flood everything).
@@@ -387,7 -333,7 +387,7 @@@
*/
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
- /* VLAN filtering is handled by dsa_switch_bridge_leave */
+ dsa_port_reset_vlan_filtering(dp, bridge);
/* Ageing time may be global to the switch chip, so don't change it
* here because we have no good reason (or value) to change it to.
@@@ -459,7 -405,9 +459,7 @@@ int dsa_port_bridge_join(struct dsa_por
struct netlink_ext_ack *extack)
{
struct dsa_notifier_bridge_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.extack = extack,
};
struct net_device *dev = dp->slave;
@@@ -503,6 -451,7 +503,7 @@@ out_rollback_unoffload
switchdev_bridge_port_unoffload(brport_dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier);
+ dsa_flush_workqueue();
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
out_rollback:
@@@ -528,7 -477,9 +529,7 @@@ void dsa_port_pre_bridge_leave(struct d
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
int err;
@@@ -551,14 -502,15 +552,14 @@@
"port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
- dsa_port_switchdev_unsync_attrs(dp);
+ dsa_port_switchdev_unsync_attrs(dp, info.bridge);
}
int dsa_port_lag_change(struct dsa_port *dp,
struct netdev_lag_lower_state_info *linfo)
{
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
bool tx_enabled;
@@@ -627,7 -579,8 +628,7 @@@ int dsa_port_lag_join(struct dsa_port *
struct netlink_ext_ack *extack)
{
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.info = uinfo,
};
struct net_device *bridge_dev;
@@@ -672,7 -625,8 +673,7 @@@ void dsa_port_lag_leave(struct dsa_por
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
int err;
@@@ -930,10 -884,13 +931,10 @@@ int dsa_port_vlan_msti(struct dsa_port
return ds->ops->vlan_msti_set(ds, *dp->bridge, msti);
}
-int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool targeted_match)
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu)
{
struct dsa_notifier_mtu_info info = {
- .sw_index = dp->ds->index,
- .targeted_match = targeted_match,
- .port = dp->index,
+ .dp = dp,
.mtu = new_mtu,
};
@@@ -944,7 -901,8 +945,7 @@@ int dsa_port_fdb_add(struct dsa_port *d
u16 vid)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = {
@@@ -967,7 -925,8 +968,7 @@@ int dsa_port_fdb_del(struct dsa_port *d
u16 vid)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = {
@@@ -987,7 -946,8 +988,7 @@@ static int dsa_port_host_fdb_add(struc
struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = db,
@@@ -1038,7 -998,8 +1039,7 @@@ static int dsa_port_host_fdb_del(struc
struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = db,
@@@ -1133,7 -1094,8 +1134,7 @@@ int dsa_port_mdb_add(const struct dsa_p
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = {
.type = DSA_DB_BRIDGE,
@@@ -1151,7 -1113,8 +1152,7 @@@ int dsa_port_mdb_del(const struct dsa_p
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = {
.type = DSA_DB_BRIDGE,
@@@ -1170,7 -1133,8 +1171,7 @@@ static int dsa_port_host_mdb_add(const
struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = db,
};
@@@ -1214,7 -1178,8 +1215,7 @@@ static int dsa_port_host_mdb_del(const
struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = db,
};
@@@ -1258,7 -1223,8 +1259,7 @@@ int dsa_port_vlan_add(struct dsa_port *
struct netlink_ext_ack *extack)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
.extack = extack,
};
@@@ -1270,7 -1236,8 +1271,7 @@@ int dsa_port_vlan_del(struct dsa_port *
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
};
@@@ -1282,7 -1249,8 +1283,7 @@@ int dsa_port_host_vlan_add(struct dsa_p
struct netlink_ext_ack *extack)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
.extack = extack,
};
@@@ -1302,7 -1270,8 +1303,7 @@@ int dsa_port_host_vlan_del(struct dsa_p
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
};
struct dsa_port *cpu_dp = dp->cpu_dp;
@@@ -1723,7 -1692,9 +1724,7 @@@ void dsa_port_hsr_leave(struct dsa_por
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vid = vid,
};
@@@ -1736,7 -1707,9 +1737,7 @@@
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vid = vid,
};
int err;
diff --combined net/ipv4/ping.c
index 5f8cad2978b3,aa9a11b20d18..1a43ca73f94d
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@@ -305,6 -305,7 +305,7 @@@ static int ping_check_bind_addr(struct
struct net *net = sock_net(sk);
if (sk->sk_family == AF_INET) {
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+ u32 tb_id = RT_TABLE_LOCAL;
int chk_addr_ret;
if (addr_len < sizeof(*addr))
@@@ -318,7 -319,8 +319,8 @@@
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+ tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
+ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
addr->sin_addr.s_addr,
@@@ -355,6 -357,14 +357,14 @@@
return -ENODEV;
}
}
+
+ if (!dev && sk->sk_bound_dev_if) {
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ }
has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
scoped);
rcu_read_unlock();
@@@ -590,7 -600,7 +600,7 @@@ EXPORT_SYMBOL_GPL(ping_err)
int ping_getfrag(void *from, char *to,
int offset, int fraglen, int odd, struct sk_buff *skb)
{
- struct pingfakehdr *pfh = (struct pingfakehdr *)from;
+ struct pingfakehdr *pfh = from;
if (offset == 0) {
fraglen -= sizeof(struct icmphdr);
@@@ -844,8 -854,8 +854,8 @@@ do_confirm
goto out;
}
-int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
@@@ -861,7 -871,7 +871,7 @@@
if (flags & MSG_ERRQUEUE)
return inet_recv_error(sk, msg, len, addr_len);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@@ -934,24 -944,16 +944,24 @@@ out
}
EXPORT_SYMBOL_GPL(ping_recvmsg);
-int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
+ struct sk_buff *skb)
{
+ enum skb_drop_reason reason;
+
pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
inet_sk(sk), inet_sk(sk)->inet_num, skb);
- if (sock_queue_rcv_skb(sk, skb) < 0) {
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
+ kfree_skb_reason(skb, reason);
pr_debug("ping_queue_rcv_skb -> failed\n");
- return -1;
+ return reason;
}
- return 0;
+ return SKB_NOT_DROPPED_YET;
+}
+
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return __ping_queue_rcv_skb(sk, skb) ? -1 : 0;
}
EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
@@@ -960,12 -962,12 +970,12 @@@
* All we need to do is get the socket.
*/
-bool ping_rcv(struct sk_buff *skb)
+enum skb_drop_reason ping_rcv(struct sk_buff *skb)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NO_SOCKET;
struct sock *sk;
struct net *net = dev_net(skb->dev);
struct icmphdr *icmph = icmp_hdr(skb);
- bool rc = false;
/* We assume the packet has already been checked by icmp_rcv */
@@@ -980,17 -982,15 +990,17 @@@
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk);
- if (skb2 && !ping_queue_rcv_skb(sk, skb2))
- rc = true;
+ if (skb2)
+ reason = __ping_queue_rcv_skb(sk, skb2);
+ else
+ reason = SKB_DROP_REASON_NOMEM;
sock_put(sk);
}
- if (!rc)
+ if (reason)
pr_debug("no socket, dropping\n");
- return rc;
+ return reason;
}
EXPORT_SYMBOL_GPL(ping_rcv);
diff --combined net/ipv4/route.c
index ffbe2e4f8c89,57abd27e842c..444d4a2a422d
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@@ -503,29 -503,28 +503,29 @@@ static void ip_rt_fix_tos(struct flowi
__u8 tos = RT_FL_TOS(fl4);
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
- fl4->flowi4_scope = tos & RTO_ONLINK ?
- RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
+ if (tos & RTO_ONLINK)
+ fl4->flowi4_scope = RT_SCOPE_LINK;
}
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
- const struct sock *sk,
- const struct iphdr *iph,
- int oif, u8 tos,
- u8 prot, u32 mark, int flow_flags)
+ const struct sock *sk, const struct iphdr *iph,
+ int oif, __u8 tos, u8 prot, u32 mark,
+ int flow_flags)
{
+ __u8 scope = RT_SCOPE_UNIVERSE;
+
if (sk) {
const struct inet_sock *inet = inet_sk(sk);
oif = sk->sk_bound_dev_if;
mark = sk->sk_mark;
- tos = RT_CONN_FLAGS(sk);
+ tos = ip_sock_rt_tos(sk);
+ scope = ip_sock_rt_scope(sk);
prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
- flowi4_init_output(fl4, oif, mark, tos,
- RT_SCOPE_UNIVERSE, prot,
- flow_flags,
- iph->daddr, iph->saddr, 0, 0,
+
+ flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
+ prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
@@@ -535,9 -534,9 +535,9 @@@ static void build_skb_flow_key(struct f
const struct net *net = dev_net(skb->dev);
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
- u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
+ __u8 tos = iph->tos;
__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
}
@@@ -553,8 -552,7 +553,8 @@@ static void build_sk_flow_key(struct fl
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
- RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
+ ip_sock_rt_scope(sk),
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
@@@ -827,13 -825,14 +827,13 @@@ static void ip_do_redirect(struct dst_e
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct net *net = dev_net(skb->dev);
int oif = skb->dev->ifindex;
- u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
+ __u8 tos = iph->tos;
rt = (struct rtable *) dst;
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
- ip_rt_fix_tos(&fl4);
__ip_do_redirect(rt, skb, &fl4, true);
}
@@@ -946,7 -945,6 +946,7 @@@ static int ip_error(struct sk_buff *skb
struct inet_peer *peer;
unsigned long now;
struct net *net;
+ SKB_DR(reason);
bool send;
int code;
@@@ -966,12 -964,10 +966,12 @@@
if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) {
case EHOSTUNREACH:
+ SKB_DR_SET(reason, IP_INADDRERRORS);
__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
break;
case ENETUNREACH:
+ SKB_DR_SET(reason, IP_INNOROUTES);
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
}
@@@ -987,7 -983,6 +987,7 @@@
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
+ SKB_DR_SET(reason, IP_INNOROUTES);
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
@@@ -1014,7 -1009,7 +1014,7 @@@
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
-out: kfree_skb(skb);
+out: kfree_skb_reason(skb, reason);
return 0;
}
@@@ -1062,6 -1057,7 +1062,6 @@@ static void ip_rt_update_pmtu(struct ds
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
- ip_rt_fix_tos(&fl4);
/* Don't make lookup fail for bridged encapsulations */
if (skb && netif_is_any_bridge_port(skb->dev))
@@@ -1078,8 -1074,8 +1078,8 @@@ void ipv4_update_pmtu(struct sk_buff *s
struct rtable *rt;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
- __build_flow_key(net, &fl4, NULL, iph, oif,
- RT_TOS(iph->tos), protocol, mark, 0);
+ __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
+ 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
@@@ -1136,6 -1132,8 +1136,6 @@@ void ipv4_sk_update_pmtu(struct sk_buf
goto out;
new = true;
- } else {
- ip_rt_fix_tos(&fl4);
}
__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
@@@ -1167,7 -1165,8 +1167,7 @@@ void ipv4_redirect(struct sk_buff *skb
struct flowi4 fl4;
struct rtable *rt;
- __build_flow_key(net, &fl4, NULL, iph, oif,
- RT_TOS(iph->tos), protocol, 0, 0);
+ __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
@@@ -1754,6 -1753,7 +1754,7 @@@ static int ip_route_input_mc(struct sk_
#endif
RT_CACHE_STAT_INC(in_slow_mc);
+ skb_dst_drop(skb);
skb_dst_set(skb, &rth->dst);
return 0;
}
@@@ -3395,7 -3395,7 +3396,7 @@@ static int inet_rtm_getroute(struct sk_
fri.tb_id = table_id;
fri.dst = res.prefix;
fri.dst_len = res.prefixlen;
- fri.tos = fl4.flowi4_tos;
+ fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
fri.type = rt->rt_type;
fri.offload = 0;
fri.trap = 0;
@@@ -3408,7 -3408,7 +3409,7 @@@
if (fa->fa_slen == slen &&
fa->tb_id == fri.tb_id &&
- fa->fa_dscp == inet_dsfield_to_dscp(fri.tos) &&
+ fa->fa_dscp == fri.dscp &&
fa->fa_info == res.fi &&
fa->fa_type == fri.type) {
fri.offload = READ_ONCE(fa->offload);
diff --combined net/mac80211/mlme.c
index b857915881e0,dc8aec1a5d3d..07a96f7c5dc3
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@@ -3342,7 -3342,7 +3342,7 @@@ static bool ieee80211_twt_req_supported
if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT))
return false;
- return sta->sta.he_cap.he_cap_elem.mac_cap_info[0] &
+ return sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_TWT_RES;
}
@@@ -3369,7 -3369,7 +3369,7 @@@ static bool ieee80211_twt_bcast_support
ieee80211_vif_type_p2p(&sdata->vif));
return bss_conf->he_support &&
- (sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+ (sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_BCAST_TWT) &&
own_he_cap &&
(own_he_cap->he_cap_elem.mac_cap_info[2] &
@@@ -3587,7 -3587,7 +3587,7 @@@ static bool ieee80211_assoc_success(str
elems->he_6ghz_capa,
sta);
- bss_conf->he_support = sta->sta.he_cap.has_he;
+ bss_conf->he_support = sta->sta.deflink.he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
wiphy_ext_feature_isset(local->hw.wiphy,
@@@ -3607,7 -3607,7 +3607,7 @@@
elems->eht_cap_len,
sta);
- bss_conf->eht_support = sta->sta.eht_cap.has_eht;
+ bss_conf->eht_support = sta->sta.deflink.eht_cap.has_eht;
} else {
bss_conf->eht_support = false;
}
@@@ -3657,6 -3657,12 +3657,12 @@@
cbss->transmitted_bss->bssid);
bss_conf->bssid_indicator = cbss->max_bssid_indicator;
bss_conf->bssid_index = cbss->bssid_index;
+ } else {
+ bss_conf->nontransmitted = false;
+ memset(bss_conf->transmitter_bssid, 0,
+ sizeof(bss_conf->transmitter_bssid));
+ bss_conf->bssid_indicator = 0;
+ bss_conf->bssid_index = 0;
}
/*
@@@ -3678,7 -3684,7 +3684,7 @@@
nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- sta->sta.rx_nss = nss;
+ sta->sta.deflink.rx_nss = nss;
}
rate_control_rate_init(sta);
@@@ -4836,9 -4842,9 +4842,9 @@@ static void ieee80211_sta_conn_mon_time
if (!sta)
return;
- timeout = sta->status_stats.last_ack;
- if (time_before(sta->status_stats.last_ack, sta->rx_stats.last_rx))
- timeout = sta->rx_stats.last_rx;
+ timeout = sta->deflink.status_stats.last_ack;
+ if (time_before(sta->deflink.status_stats.last_ack,
sta->deflink.rx_stats.last_rx))
+ timeout = sta->deflink.rx_stats.last_rx;
timeout += IEEE80211_CONNECTION_IDLE_TIME;
/* If timeout is after now, then update timer to fire at
@@@ -5638,7 -5644,7 +5644,7 @@@ static int ieee80211_prep_connection(st
}
if (rates)
- new_sta->sta.supp_rates[cbss->channel->band] = rates;
+ new_sta->sta.deflink.supp_rates[cbss->channel->band] = rates;
else
sdata_info(sdata,
"No rates found, keeping mandatory only\n");
diff --combined net/mac80211/rx.c
index 959a36fd658b,88d797fa82ff..3c08ae04ddbc
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@@ -221,7 -221,7 +221,7 @@@ static void __ieee80211_queue_skb_to_if
skb_queue_tail(&sdata->skb_queue, skb);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
if (sta)
- sta->rx_stats.packets++;
+ sta->deflink.rx_stats.packets++;
}
static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
@@@ -1405,8 -1405,7 +1405,7 @@@ static void ieee80211_rx_reorder_ampdu(
goto dont_reorder;
/* not part of a BA session */
- if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
- ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */
@@@ -1465,7 -1464,7 +1464,7 @@@ ieee80211_rx_h_check_dup(struct ieee802
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
- rx->sta->rx_stats.num_duplicates++;
+ rx->sta->deflink.rx_stats.num_duplicates++;
return RX_DROP_UNUSABLE;
} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
@@@ -1761,47 -1760,46 +1760,47 @@@ ieee80211_rx_h_sta_process(struct ieee8
NL80211_IFTYPE_ADHOC);
if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1))
- sta->rx_stats.last_rate =
+ sta->deflink.rx_stats.last_rate =
sta_stats_encode_rate(status);
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
} else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control))
- sta->rx_stats.last_rate = sta_stats_encode_rate(status);
+ sta->deflink.rx_stats.last_rate = sta_stats_encode_rate(status);
}
- sta->rx_stats.fragments++;
+ sta->deflink.rx_stats.fragments++;
- u64_stats_update_begin(&rx->sta->rx_stats.syncp);
- sta->rx_stats.bytes += rx->skb->len;
- u64_stats_update_end(&rx->sta->rx_stats.syncp);
+ u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp);
+ sta->deflink.rx_stats.bytes += rx->skb->len;
+ u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp);
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
- sta->rx_stats.last_signal = status->signal;
- ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
+ sta->deflink.rx_stats.last_signal = status->signal;
+ ewma_signal_add(&sta->deflink.rx_stats_avg.signal,
+ -status->signal);
}
if (status->chains) {
- sta->rx_stats.chains = status->chains;
+ sta->deflink.rx_stats.chains = status->chains;
for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
int signal = status->chain_signal[i];
if (!(status->chains & BIT(i)))
continue;
- sta->rx_stats.chain_signal_last[i] = signal;
- ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
+ sta->deflink.rx_stats.chain_signal_last[i] = signal;
+ ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i],
-signal);
}
}
@@@ -1862,7 -1860,7 +1861,7 @@@
* Update counter and free packet here to avoid
* counting this as a dropped packed.
*/
- sta->rx_stats.packets++;
+ sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@@ -1894,11 -1892,11 +1893,11 @@@ ieee80211_rx_get_bigtk(struct ieee80211
}
if (rx->sta)
- key = rcu_dereference(rx->sta->gtk[idx]);
+ key = rcu_dereference(rx->sta->deflink.gtk[idx]);
if (!key)
key = rcu_dereference(sdata->keys[idx]);
if (!key && rx->sta)
- key = rcu_dereference(rx->sta->gtk[idx2]);
+ key = rcu_dereference(rx->sta->deflink.gtk[idx2]);
if (!key)
key = rcu_dereference(sdata->keys[idx2]);
@@@ -2013,7 -2011,7 +2012,7 @@@ ieee80211_rx_h_decrypt(struct ieee80211
test_sta_flag(rx->sta, WLAN_STA_MFP))
return RX_DROP_MONITOR;
- rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
+ rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]);
}
if (!rx->key)
rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
@@@ -2036,7 -2034,7 +2035,7 @@@
} else {
if (rx->sta) {
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(rx->sta->gtk[i]);
+ key = rcu_dereference(rx->sta->deflink.gtk[i]);
if (key)
break;
}
@@@ -2073,7 -2071,7 +2072,7 @@@
/* check per-station GTK first, if multicast packet */
if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
- rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
+ rx->key = rcu_dereference(rx->sta->deflink.gtk[keyidx]);
/* if not found, try default key */
if (!rx->key) {
@@@ -2399,7 -2397,7 +2398,7 @@@ ieee80211_rx_h_defragment(struct ieee80
out:
ieee80211_led_rx(rx->local);
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
return RX_CONTINUE;
}
@@@ -2646,9 -2644,9 +2645,9 @@@ ieee80211_deliver_skb(struct ieee80211_
* for non-QoS-data frames. Here we know it's a data
* frame, so count MSDUs.
*/
- u64_stats_update_begin(&rx->sta->rx_stats.syncp);
- rx->sta->rx_stats.msdu[rx->seqno_idx]++;
- u64_stats_update_end(&rx->sta->rx_stats.syncp);
+ u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp);
+ rx->sta->deflink.rx_stats.msdu[rx->seqno_idx]++;
+ u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp);
}
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
@@@ -3179,49 -3177,6 +3178,49 @@@ static void ieee80211_process_sa_query_
ieee80211_tx_skb(sdata, skb);
}
+static void
+ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+ const struct element *ie;
+ size_t baselen;
+
+ if (!wiphy_ext_feature_isset(rx->local->hw.wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR))
+ return;
+
+ if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
+ return;
+
+ if (rx->sdata->vif.csa_active)
+ return;
+
+ baselen = mgmt->u.beacon.variable - rx->skb->data;
+ if (baselen > rx->skb->len)
+ return;
+
+ ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
+ mgmt->u.beacon.variable,
+ rx->skb->len - baselen);
+ if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation)
&&
+ ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
+ struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf;
+ const struct ieee80211_he_operation *he_oper;
+ u8 color;
+
+ he_oper = (void *)(ie->data + 1);
+ if (le32_get_bits(he_oper->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED))
+ return;
+
+ color = le32_get_bits(he_oper->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
+ if (color == bss_conf->he_bss_color.color)
+ ieeee80211_obss_color_collision_notify(&rx->sdata->vif,
+ BIT_ULL(color));
+ }
+}
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
{
@@@ -3247,9 -3202,6 +3246,9 @@@
!(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
int sig = 0;
+ /* sw bss color collision detection */
+ ieee80211_rx_check_bss_color_collision(rx);
+
if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
!(status->flag & RX_FLAG_NO_SIGNAL_VAL))
sig = status->signal;
@@@ -3343,7 -3295,7 +3342,7 @@@ ieee80211_rx_h_action(struct ieee80211_
switch (mgmt->u.action.category) {
case WLAN_CATEGORY_HT:
/* reject HT action frames from stations not supporting HT */
- if (!rx->sta->sta.ht_cap.ht_supported)
+ if (!rx->sta->sta.deflink.ht_cap.ht_supported)
goto invalid;
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
@@@ -3407,7 -3359,7 +3406,7 @@@
struct sta_opmode_info sta_opmode = {};
/* If it doesn't support 40 MHz it can't change ... */
- if (!(rx->sta->sta.ht_cap.cap &
+ if (!(rx->sta->sta.deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
goto handled;
@@@ -3417,13 -3369,13 +3416,13 @@@
max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
/* set cur_max_bandwidth and recalc sta bw */
- rx->sta->cur_max_bandwidth = max_bw;
+ rx->sta->deflink.cur_max_bandwidth = max_bw;
new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
- if (rx->sta->sta.bandwidth == new_bw)
+ if (rx->sta->sta.deflink.bandwidth == new_bw)
goto handled;
- rx->sta->sta.bandwidth = new_bw;
+ rx->sta->sta.deflink.bandwidth = new_bw;
sband = rx->local->hw.wiphy->bands[status->band];
sta_opmode.bw =
ieee80211_sta_rx_bw_to_chan_width(rx->sta);
@@@ -3620,7 -3572,7 +3619,7 @@@
handled:
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
@@@ -3654,7 -3606,7 +3653,7 @@@ ieee80211_rx_h_userspace_mgmt(struct ie
ieee80211_rx_status_to_khz(status), sig,
rx->skb->data, rx->skb->len, 0)) {
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@@ -3692,7 -3644,7 +3691,7 @@@ ieee80211_rx_h_action_post_userspace(st
handled:
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@@ -3912,7 -3864,7 +3911,7 @@@ static void ieee80211_rx_handlers_resul
case RX_DROP_MONITOR:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_stats.dropped++;
+ rx->sta->deflink.rx_stats.dropped++;
fallthrough;
case RX_CONTINUE: {
struct ieee80211_rate *rate = NULL;
@@@ -3931,7 -3883,7 +3930,7 @@@
case RX_DROP_UNUSABLE:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_stats.dropped++;
+ rx->sta->deflink.rx_stats.dropped++;
dev_kfree_skb(rx->skb);
break;
case RX_QUEUED:
@@@ -4483,15 -4435,15 +4482,15 @@@ static void ieee80211_rx_8023(struct ie
void *sa = skb->data + ETH_ALEN;
void *da = skb->data;
- stats = &sta->rx_stats;
+ stats = &sta->deflink.rx_stats;
if (fast_rx->uses_rss)
- stats = this_cpu_ptr(sta->pcpu_rx_stats);
+ stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats);
/* statistics part of ieee80211_rx_h_sta_process() */
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
if (!fast_rx->uses_rss)
- ewma_signal_add(&sta->rx_stats_avg.signal,
+ ewma_signal_add(&sta->deflink.rx_stats_avg.signal,
-status->signal);
}
@@@ -4507,7 -4459,7 +4506,7 @@@
stats->chain_signal_last[i] = signal;
if (!fast_rx->uses_rss)
- ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
+ ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i],
-signal);
}
}
@@@ -4583,7 -4535,7 +4582,7 @@@ static bool ieee80211_invoke_fast_rx(st
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
} addrs __aligned(2);
- struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+ struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats;
/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
* to a common data structure; drivers can implement that per queue
@@@ -4685,7 -4637,7 +4684,7 @@@
drop:
dev_kfree_skb(skb);
if (fast_rx->uses_rss)
- stats = this_cpu_ptr(sta->pcpu_rx_stats);
+ stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats);
stats->dropped++;
return true;
diff --combined net/netlink/af_netlink.c
index 1b5a9c2e1c29,73e9c0a9c187..0cd91f813a3b
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@@ -1931,6 -1931,7 +1931,6 @@@ static int netlink_recvmsg(struct socke
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int noblock = flags & MSG_DONTWAIT;
size_t copied;
struct sk_buff *skb, *data_skb;
int err, ret;
@@@ -1940,7 -1941,7 +1940,7 @@@
copied = 0;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (skb == NULL)
goto out;
@@@ -1974,7 -1975,6 +1974,6 @@@
copied = len;
}
- skb_reset_transport_header(data_skb);
err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
if (msg->msg_name) {
diff --combined net/sched/act_pedit.c
index e01ef7f109f4,0eaaf1f45de1..d1221daa0952
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@@ -149,7 -149,7 +149,7 @@@ static int tcf_pedit_init(struct net *n
struct nlattr *pattr;
struct tcf_pedit *p;
int ret = 0, err;
- int ksize;
+ int i, ksize;
u32 index;
if (!nla) {
@@@ -228,6 -228,18 +228,18 @@@
p->tcfp_nkeys = parm->nkeys;
}
memcpy(p->tcfp_keys, parm->keys, ksize);
+ p->tcfp_off_max_hint = 0;
+ for (i = 0; i < p->tcfp_nkeys; ++i) {
+ u32 cur = p->tcfp_keys[i].off;
+
+ /* The AT option can read a single byte, we can bound the actual
+ * value with uchar max.
+ */
+ cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
+
+ /* Each key touches 4 bytes starting from the computed offset */
+ p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
+ }
p->tcfp_flags = parm->flags;
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@@ -308,13 -320,18 +320,18 @@@ static int tcf_pedit_act(struct sk_buf
struct tcf_result *res)
{
struct tcf_pedit *p = to_pedit(a);
+ u32 max_offset;
int i;
- if (skb_unclone(skb, GFP_ATOMIC))
- return p->tcf_action;
-
spin_lock(&p->tcf_lock);
+ max_offset = (skb_transport_header_was_set(skb) ?
+ skb_transport_offset(skb) :
+ skb_network_offset(skb)) +
+ p->tcfp_off_max_hint;
+ if (skb_ensure_writable(skb, min(skb->len, max_offset)))
+ goto unlock;
+
tcf_lastuse_update(&p->tcf_tm);
if (p->tcfp_nkeys > 0) {
@@@ -403,6 -420,7 +420,7 @@@ bad
p->tcf_qstats.overlimits++;
done:
bstats_update(&p->tcf_bstats, skb);
+ unlock:
spin_unlock(&p->tcf_lock);
return p->tcf_action;
}
@@@ -488,8 -506,7 +506,8 @@@ static int tcf_pedit_search(struct net
}
static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@@ -504,7 -521,6 +522,7 @@@
entry->id = FLOW_ACTION_ADD;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit command offload");
return -EOPNOTSUPP;
}
entry->mangle.htype = tcf_pedit_htype(act, k);
diff --combined net/sunrpc/xprtsock.c
index 5c91c5457197,650102a9c86a..fcdd0fca408e
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@@ -1355,7 -1355,7 +1355,7 @@@ static void xs_udp_data_receive(struct
if (sk == NULL)
goto out;
for (;;) {
- skb = skb_recv_udp(sk, 0, 1, &err);
+ skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
if (skb == NULL)
break;
xs_udp_data_read_skb(&transport->xprt, sk, skb);
@@@ -1418,6 -1418,26 +1418,26 @@@ static size_t xs_tcp_bc_maxpayload(stru
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+ /**
+ * xs_local_state_change - callback to handle AF_LOCAL socket state changes
+ * @sk: socket whose state has changed
+ *
+ */
+ static void xs_local_state_change(struct sock *sk)
+ {
+ struct rpc_xprt *xprt;
+ struct sock_xprt *transport;
+
+ if (!(xprt = xprt_from_sock(sk)))
+ return;
+ transport = container_of(xprt, struct sock_xprt, xprt);
+ if (sk->sk_shutdown & SHUTDOWN_MASK) {
+ clear_bit(XPRT_CONNECTED, &xprt->state);
+ /* Trigger the socket release */
+ xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
+ }
+ }
+
/**
* xs_tcp_state_change - callback to handle TCP socket state changes
* @sk: socket whose state has changed
@@@ -1866,6 -1886,7 +1886,7 @@@ static int xs_local_finish_connecting(s
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_data_ready;
sk->sk_write_space = xs_udp_write_space;
+ sk->sk_state_change = xs_local_state_change;
sk->sk_error_report = xs_error_report;
xprt_clear_connected(xprt);
@@@ -1950,6 -1971,9 +1971,9 @@@ static void xs_local_connect(struct rpc
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
int ret;
+ if (transport->file)
+ goto force_disconnect;
+
if (RPC_IS_ASYNC(task)) {
/*
* We want the AF_LOCAL connect to be resolved in the
@@@ -1962,11 -1986,17 +1986,17 @@@
*/
task->tk_rpc_status = -ENOTCONN;
rpc_exit(task, -ENOTCONN);
- return;
+ goto out_wake;
}
ret = xs_local_setup_socket(transport);
if (ret && !RPC_IS_SOFTCONN(task))
msleep_interruptible(15000);
+ return;
+ force_disconnect:
+ xprt_force_disconnect(xprt);
+ out_wake:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, -ENOTCONN);
}
#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
@@@ -2845,9 -2875,6 +2875,6 @@@ static struct rpc_xprt *xs_setup_local(
}
xprt_set_bound(xprt);
xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
- ret = ERR_PTR(xs_local_setup_socket(transport));
- if (ret)
- goto out_err;
break;
default:
ret = ERR_PTR(-EAFNOSUPPORT);
diff --combined net/tls/tls_device.c
index b12f81a2b44c,3919fe2c58c5..bca00521ebc1
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@@ -964,9 -964,11 +964,9 @@@ int tls_device_decrypted(struct sock *s
tls_ctx->rx.rec_seq, rxm->full_len,
is_encrypted, is_decrypted);
- ctx->sw.decrypted |= is_decrypted;
-
if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
if (likely(is_encrypted || is_decrypted))
- return 0;
+ return is_decrypted;
/* After tls_device_down disables the offload, the next SKB will
* likely have initial fragments decrypted, and final ones not
@@@ -981,7 -983,7 +981,7 @@@
*/
if (is_decrypted) {
ctx->resync_nh_reset = 1;
- return 0;
+ return is_decrypted;
}
if (is_encrypted) {
tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
@@@ -1345,7 -1347,10 +1345,10 @@@ static int tls_device_down(struct net_d
/* Device contexts for RX and TX will be freed in on sk_destruct
* by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+ * Now release the ref taken above.
*/
+ if (refcount_dec_and_test(&ctx->refcount))
+ tls_device_free_ctx(ctx);
}
up_write(&device_offload_lock);
diff --combined net/wireless/nl80211.c
index 945ed87d12e0,1a3551b6d18b..02a29052e41d
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@@ -3173,6 -3173,15 +3173,15 @@@ int nl80211_parse_chandef(struct cfg802
} else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
chandef->width =
nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
+ if (chandef->chan->band == NL80211_BAND_S1GHZ) {
+ /* User input error for channel width doesn't match channel */
+ if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_CHANNEL_WIDTH],
+ "bad channel width");
+ return -EINVAL;
+ }
+ }
if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
chandef->center_freq1 =
nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
@@@ -3710,7 -3719,6 +3719,7 @@@ static int nl80211_send_iface(struct sk
wdev_lock(wdev);
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
if (wdev->ssid_len &&
nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
goto nla_put_failure_locked;
@@@ -11658,18 -11666,23 +11667,23 @@@ static int nl80211_set_tx_bitrate_mask(
struct cfg80211_bitrate_mask mask;
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
+ wdev_lock(wdev);
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES, &mask,
dev, true);
if (err)
- return err;
+ goto out;
- return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+ err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+ out:
+ wdev_unlock(wdev);
+ return err;
}
static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
diff --combined tools/testing/selftests/net/Makefile
index 0fbdacfdcd6a,e1f998defd10..811e9b712bea
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@@ -25,6 -25,7 +25,7 @@@ TEST_PROGS += bareudp.s
TEST_PROGS += amt.sh
TEST_PROGS += unicast_extensions.sh
TEST_PROGS += udpgro_fwd.sh
+ TEST_PROGS += udpgro_frglist.sh
TEST_PROGS += veth.sh
TEST_PROGS += ioam6.sh
TEST_PROGS += gro.sh
@@@ -36,7 -37,6 +37,7 @@@ TEST_PROGS += srv6_end_dt4_l3vpn_test.s
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
+TEST_PROGS += ndisc_unsolicited_na_test.sh
TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
@@@ -62,6 -62,8 +63,8 @@@ TEST_FILES := setting
KSFT_KHDR_INSTALL := 1
include ../lib.mk
+ include bpf/Makefile
+
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
$(OUTPUT)/tcp_inq: LDLIBS += -lpthread
--
LinuxNextTracking