The following commit has been merged in the master branch:
commit 45d9bc1304e6e4b441dd1b1da32bbb4d99dcd7f0
Merge: d117b0ca25b672c94f011d2f5ee48823cccb8e7c d6fc1923d6839a564861e7da06016377a795f6a9
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Thu Aug 27 10:33:29 2020 +1000
Merge remote-tracking branch 'net-next/master' into master
# Conflicts:
# drivers/net/ethernet/ibm/ibmvnic.c
diff --combined MAINTAINERS
index 8383735974e4,36ec0bd50a8f..97b5ef6b878c
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -2220,8 -2220,8 +2220,8 @@@ ARM/OPENMOKO NEO FREERUNNER (GTA02) MAC
L: openmoko-kernel(a)lists.openmoko.org (subscribers-only)
S: Orphan
W:
http://wiki.openmoko.org/wiki/Neo_FreeRunner
-F: arch/arm/mach-s3c24xx/gta02.h
-F: arch/arm/mach-s3c24xx/mach-gta02.c
+F: arch/arm/mach-s3c/gta02.h
+F: arch/arm/mach-s3c/mach-gta02.c
ARM/Orion SoC/Technologic Systems TS-78xx platform support
M: Alexander Clouter <alex(a)digriz.org.uk>
@@@ -2410,8 -2410,10 +2410,8 @@@ F: arch/arm/boot/dts/exynos
F: arch/arm/boot/dts/s3c*
F: arch/arm/boot/dts/s5p*
F: arch/arm/mach-exynos*/
-F: arch/arm/mach-s3c24*/
-F: arch/arm/mach-s3c64xx/
+F: arch/arm/mach-s3c/
F: arch/arm/mach-s5p*/
-F: arch/arm/plat-samsung/
F: arch/arm64/boot/dts/exynos/
F: drivers/*/*/*s3c24*
F: drivers/*/*s3c24*
@@@ -2422,9 -2424,6 +2422,9 @@@ F: drivers/soc/samsung
F: drivers/tty/serial/samsung*
F: include/linux/soc/samsung/
N: exynos
+N: s3c2410
+N: s3c64xx
+N: s5pv210
ARM/SAMSUNG MOBILE MACHINE SUPPORT
M: Kyungmin Park <kyungmin.park(a)samsung.com>
@@@ -3206,7 -3205,6 +3206,7 @@@ S: Maintaine
T: git
git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: block/
F: drivers/block/
+F: include/linux/blk*
F: kernel/trace/blktrace.c
F: lib/sbitmap.c
@@@ -3390,7 -3388,6 +3390,7 @@@ M: Florian Fainelli <f.fainelli(a)gmail.c
L: netdev(a)vger.kernel.org
L: openwrt-devel(a)lists.openwrt.org (subscribers-only)
S: Supported
+F: Documentation/devicetree/bindings/net/dsa/b53.txt
F: drivers/net/dsa/b53/*
F: include/linux/platform_data/b53.h
@@@ -3576,28 -3573,13 +3576,28 @@@ L: bcm-kernel-feedback-list(a)broadcom.co
S: Maintained
F: drivers/phy/broadcom/phy-brcm-usb*
+BROADCOM ETHERNET PHY DRIVERS
+M: Florian Fainelli <f.fainelli(a)gmail.com>
+L: bcm-kernel-feedback-list(a)broadcom.com
+L: netdev(a)vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
+F: drivers/net/phy/bcm*.[ch]
+F: drivers/net/phy/broadcom.c
+F: include/linux/brcmphy.h
+
BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb(a)gmail.com>
M: Florian Fainelli <f.fainelli(a)gmail.com>
L: bcm-kernel-feedback-list(a)broadcom.com
L: netdev(a)vger.kernel.org
S: Supported
+F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
F: drivers/net/ethernet/broadcom/genet/
+F: drivers/net/mdio/mdio-bcm-unimac.c
+F: include/linux/platform_data/bcmgenet.h
+F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui(a)broadcom.com>
@@@ -4265,7 -4247,6 +4265,7 @@@ W:
https://clangbuiltlinux.github.io
B:
https://github.com/ClangBuiltLinux/linux/issues
C:
irc://chat.freenode.net/clangbuiltlinux
F: Documentation/kbuild/llvm.rst
+F: scripts/clang-tools/
K: \b(?i:clang|llvm)\b
CLEANCACHE API
@@@ -4711,6 -4692,15 +4711,15 @@@ S: Supporte
W:
http://www.chelsio.com
F: drivers/crypto/chelsio
+ CXGB4 INLINE CRYPTO DRIVER
+ M: Ayush Sawal <ayush.sawal(a)chelsio.com>
+ M: Vinay Kumar Yadav <vinay.yadav(a)chelsio.com>
+ M: Rohit Maheshwari <rohitm(a)chelsio.com>
+ L: netdev(a)vger.kernel.org
+ S: Supported
+ W:
http://www.chelsio.com
+ F: drivers/net/ethernet/chelsio/inline_crypto/
+
CXGB4 ETHERNET DRIVER (CXGB4)
M: Vishal Kulkarni <vishal(a)chelsio.com>
L: netdev(a)vger.kernel.org
@@@ -5258,7 -5248,6 +5267,7 @@@ DOCUMENTATIO
M: Jonathan Corbet <corbet(a)lwn.net>
L: linux-doc(a)vger.kernel.org
S: Maintained
+P: Documentation/doc-guide/maintainer-profile.rst
T: git
git://git.lwn.net/linux.git docs-next
F: Documentation/
F: scripts/documentation-file-ref-check
@@@ -6514,6 -6503,7 +6523,6 @@@ F: net/bridge
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew(a)lunn.ch>
-M: Florian Fainelli <f.fainelli(a)gmail.com>
M: Heiner Kallweit <hkallweit1(a)gmail.com>
R: Russell King <linux(a)armlinux.org.uk>
L: netdev(a)vger.kernel.org
@@@ -6903,14 -6893,6 +6912,14 @@@ L: linuxppc-dev(a)lists.ozlabs.or
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE DSPI DRIVER
+M: Vladimir Oltean <olteanv(a)gmail.com>
+L: linux-spi(a)vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+F: drivers/spi/spi-fsl-dspi.c
+F: include/linux/spi/spi-fsl-dspi.h
+
FREESCALE ENETC ETHERNET DRIVERS
M: Claudiu Manoil <claudiu.manoil(a)nxp.com>
L: netdev(a)vger.kernel.org
@@@ -7199,7 -7181,7 +7208,7 @@@ FUSE: FILESYSTEM IN USERSPAC
M: Miklos Szeredi <miklos(a)szeredi.hu>
L: linux-fsdevel(a)vger.kernel.org
S: Maintained
-W:
http://fuse.sourceforge.net/
+W:
https://github.com/libfuse/
T: git
git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
F: Documentation/filesystems/fuse.rst
F: fs/fuse/
@@@ -13951,7 -13933,6 +13960,7 @@@ PRINT
M: Petr Mladek <pmladek(a)suse.com>
M: Sergey Senozhatsky <sergey.senozhatsky(a)gmail.com>
R: Steven Rostedt <rostedt(a)goodmis.org>
+R: John Ogness <john.ogness(a)linutronix.de>
S: Maintained
F: include/linux/printk.h
F: kernel/printk/
@@@ -14070,13 -14051,6 +14079,13 @@@ T: git
git://linuxtv.org/media_tree.gi
F: Documentation/admin-guide/media/pulse8-cec.rst
F: drivers/media/cec/usb/pulse8/
+PURISM LIBREM 5
+M: Purism Kernel Team <kernel(a)puri.sm>
+S: Supported
+B:
https://source.puri.sm/Librem5/linux-next/issues
+T:
https://source.puri.sm/Librem5/linux-next
+F: arch/arm64/boot/dts/freescale/imx8mq-librem5*
+
PVRUSB2 VIDEO4LINUX DRIVER
M: Mike Isely <isely(a)pobox.com>
L: pvrusb2(a)isely.net (subscribers-only)
@@@ -15336,8 -15310,6 +15345,8 @@@ F: Documentation/devicetree/bindings/cl
F: Documentation/devicetree/bindings/clock/samsung,s5p*
F: drivers/clk/samsung/
F: include/dt-bindings/clock/exynos*.h
+F: include/linux/clk/samsung.h
+F: include/linux/platform_data/clk-s3c2410.h
SAMSUNG SPI DRIVERS
M: Kukjin Kim <kgene(a)kernel.org>
@@@ -15349,7 -15321,6 +15358,7 @@@ S: Maintaine
F: Documentation/devicetree/bindings/spi/spi-samsung.txt
F: drivers/spi/spi-s3c*
F: include/linux/platform_data/spi-s3c64xx.h
+F: include/linux/spi/s3c24xx-fiq.h
SAMSUNG SXGBE DRIVERS
M: Byungho An <bh74.an(a)samsung.com>
@@@ -15863,17 -15834,19 +15872,17 @@@ F: drivers/video/fbdev/simplefb.
F: include/linux/platform_data/simplefb.h
SIMTEC EB110ATX (Chalice CATS)
-M: Vincent Sanders <vince(a)simtec.co.uk>
M: Simtec Linux Team <linux(a)simtec.co.uk>
S: Supported
W:
http://www.simtec.co.uk/products/EB110ATX/
SIMTEC EB2410ITX (BAST)
-M: Vincent Sanders <vince(a)simtec.co.uk>
M: Simtec Linux Team <linux(a)simtec.co.uk>
S: Supported
W:
http://www.simtec.co.uk/products/EB2410ITX/
-F: arch/arm/mach-s3c24xx/bast-ide.c
-F: arch/arm/mach-s3c24xx/bast-irq.c
-F: arch/arm/mach-s3c24xx/mach-bast.c
+F: arch/arm/mach-s3c/bast-ide.c
+F: arch/arm/mach-s3c/bast-irq.c
+F: arch/arm/mach-s3c/mach-bast.c
SIOX
M: Thorsten Scherer <t.scherer(a)eckelmann.de>
@@@ -18792,7 -18765,7 +18801,7 @@@ F: Documentation/devicetree/bindings/mf
F: Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
F: Documentation/devicetree/bindings/sound/wlf,arizona.yaml
F: Documentation/hwmon/wm83??.rst
-F: arch/arm/mach-s3c64xx/mach-crag6410*
+F: arch/arm/mach-s3c/mach-crag6410*
F: drivers/clk/clk-wm83*.c
F: drivers/extcon/extcon-arizona.c
F: drivers/gpio/gpio-*wm*.c
diff --combined drivers/net/ethernet/8390/axnet_cs.c
index a00b36f91d9f,a001bc902359..2488bfdb9133
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@@ -610,7 -610,7 +610,7 @@@ static int axnet_ioctl(struct net_devic
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
@@@ -657,8 -657,10 +657,10 @@@ static void block_input(struct net_devi
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
insw(nic_base + AXNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT);
+ xfer_count++;
+ }
}
@@@ -1270,10 -1272,12 +1272,12 @@@ static void ei_tx_intr(struct net_devic
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
}
- else ei_local->lasttx = 20, ei_local->txing = 0;
}
else if (ei_local->tx2 < 0)
{
@@@ -1289,9 -1293,10 +1293,10 @@@
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
}
- else
- ei_local->lasttx = 10, ei_local->txing = 0;
}
// else
// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
diff --combined drivers/net/ethernet/8390/pcnet_cs.c
index 164c3ed550bf,c383f16889f4..9d3b1e0e425c
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@@ -1108,7 -1108,7 +1108,7 @@@ static int ei_ioctl(struct net_device *
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
@@@ -1178,8 -1178,10 +1178,10 @@@ static void dma_block_input(struct net_
outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
insw(nic_base + PCNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT);
+ xfer_count++;
+ }
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fa3367966f4b,e49370f9d59b..98d01a7497ec
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@@ -4745,9 -4745,11 +4745,11 @@@ static void le_intr_handler(struct adap
static struct intr_info t6_le_intr_info[] = {
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
{ 0 }
};
@@@ -7656,13 -7658,13 +7658,13 @@@ int t4_alloc_vi(struct adapter *adap, u
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
- /* Fall through */
+ fallthrough;
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
- /* Fall through */
+ fallthrough;
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
- /* Fall through */
+ fallthrough;
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
diff --combined drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 22522f8a5299,3af33ade7b60..b13f3a5cdf59
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@@ -557,10 -557,7 +557,7 @@@ static int hns_nic_poll_rx_skb(struct h
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */
- prefetch(va);
- #if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
- #endif
+ net_prefetch(va);
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
@@@ -2282,10 -2279,8 +2279,10 @@@ static int hns_nic_dev_probe(struct pla
priv->enet_ver = AE_VERSION_1;
else if (acpi_dev_found(hns_enet_acpi_match[1].id))
priv->enet_ver = AE_VERSION_2;
- else
- return -ENXIO;
+ else {
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
/* try to find port-idx-in-ae first */
ret = acpi_node_get_property_reference(dev->fwnode,
@@@ -2301,8 -2296,7 +2298,8 @@@
priv->fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_read_prop_fail;
}
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a4f1d515e5e0,1a1ba6a41bfe..47ab2a5c7391
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@@ -21,7 -21,6 +21,7 @@@
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
+#include <net/geneve.h>
#include "hnae3.h"
#include "hns3_enet.h"
@@@ -781,7 -780,7 +781,7 @@@ static int hns3_get_l4_protocol(struct
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
* hardware will not do the checksum offload when udp dest port is
- * 4789.
+ * 4789 or 6081.
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
@@@ -790,8 -789,7 +790,8 @@@
l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation &&
- l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
+ (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+ l4.udp->dest == htons(GENEVE_UDP_PORT))))
return false;
skb_checksum_help(skb);
@@@ -2748,7 -2746,7 +2748,7 @@@ static void hns3_rx_checksum(struct hns
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
- /* fall through */
+ fallthrough;
case HNS3_OL4_TYPE_NO_TUN:
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S);
@@@ -3093,10 -3091,7 +3093,7 @@@ static int hns3_handle_rx_bd(struct hns
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(ring->va);
- #if L1_CACHE_BYTES < 128
- prefetch(ring->va + L1_CACHE_BYTES);
- #endif
+ net_prefetch(ring->va);
if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
diff --combined drivers/net/ethernet/ibm/ibmvnic.c
index d3a774331afc,86a83e53dce5..ed7fd9a6a0c9
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@@ -104,8 -104,7 +104,7 @@@ static int send_login(struct ibmvnic_ad
static void send_cap_queries(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
- static int ibmvnic_init(struct ibmvnic_adapter *);
- static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+ static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@@ -297,8 -296,7 +296,7 @@@ static void deactivate_rx_pools(struct
{
int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
adapter->rx_pool[i].active = 0;
}
@@@ -306,6 -304,7 +304,7 @@@ static void replenish_rx_pool(struct ib
struct ibmvnic_rx_pool *pool)
{
int count = pool->size - atomic_read(&pool->available);
+ u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
int buffers_added = 0;
unsigned long lpar_rc;
@@@ -314,7 -313,6 +313,6 @@@
unsigned int offset;
dma_addr_t dma_addr;
unsigned char *dst;
- u64 *handle_array;
int shift = 0;
int index;
int i;
@@@ -322,10 -320,6 +320,6 @@@
if (!pool->active)
return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_subcrqs));
-
for (i = 0; i < count; ++i) {
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
if (!skb) {
@@@ -369,8 -363,7 +363,7 @@@
#endif
sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
- lpar_rc = send_subcrq(adapter, handle_array[pool->index],
- &sub_crq);
+ lpar_rc = send_subcrq(adapter, handle, &sub_crq);
if (lpar_rc != H_SUCCESS)
goto failure;
@@@ -407,8 -400,7 +400,7 @@@ static void replenish_pools(struct ibmv
int i;
adapter->replenish_task_cycles++;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
}
@@@ -475,25 -467,20 +467,23 @@@ static int init_stats_token(struct ibmv
static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ u64 buff_size;
int rx_scrqs;
int i, j, rc;
- u64 *size_array;
+ if (!adapter->rx_pool)
+ return -1;
+
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
-
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ buff_size = adapter->cur_rx_buf_sz;
+ rx_scrqs = adapter->num_active_rx_pools;
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
@@@ -561,13 -548,11 +551,11 @@@ static int init_rx_pools(struct net_dev
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
- u64 *size_array;
+ u64 buff_size;
int i, j;
- rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs,
sizeof(struct ibmvnic_rx_pool),
@@@ -585,11 -570,11 +573,11 @@@
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
i, adapter->req_rx_add_entries_per_subcrq,
- be64_to_cpu(size_array[i]));
+ buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
rx_pool->index = i;
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@@ -652,10 -637,7 +640,10 @@@ static int reset_tx_pools(struct ibmvni
int tx_scrqs;
int i, rc;
+ if (!adapter->tx_pool)
+ return -1;
+
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
@@@ -744,7 -726,7 +732,7 @@@ static int init_tx_pools(struct net_dev
int tx_subcrqs;
int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_subcrqs = adapter->num_active_tx_scrqs;
adapter->tx_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
@@@ -980,7 -962,7 +968,7 @@@ static int set_link_state(struct ibmvni
return -1;
}
- if (adapter->init_done_rc == 1) {
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
/* Partuial success, delay and re-send */
mdelay(1000);
resend = true;
@@@ -1530,9 -1512,9 +1518,9 @@@ static netdev_tx_t ibmvnic_xmit(struct
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- u64 *handle_array;
int index = 0;
u8 proto = 0;
+ u64 handle;
netdev_tx_t ret = NETDEV_TX_OK;
if (test_bit(0, &adapter->resetting)) {
@@@ -1559,8 -1541,7 +1547,7 @@@
tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ handle = tx_scrq->handle;
index = tx_pool->free_map[tx_pool->consumer_index];
@@@ -1672,14 -1653,14 +1659,14 @@@
ret = NETDEV_TX_OK;
goto tx_err_out;
}
- lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq_indirect(adapter, handle,
(u64)tx_buff->indir_dma,
(u64)num_entries);
dma_unmap_single(dev, tx_buff->indir_dma,
sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
} else {
tx_buff->num_entries = num_entries;
- lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq(adapter, handle,
&tx_crq);
}
if (lpar_rc != H_SUCCESS) {
@@@ -1874,7 -1855,7 +1861,7 @@@ static int do_change_param_reset(struc
return rc;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc)
return IBMVNIC_INIT_FAILED;
@@@ -1992,7 -1973,7 +1979,7 @@@ static int do_reset(struct ibmvnic_adap
goto out;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc) {
rc = IBMVNIC_INIT_FAILED;
goto out;
@@@ -2017,10 -1998,7 +2004,10 @@@
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
adapter->req_tx_entries_per_subcrq !=
- old_num_tx_slots) {
+ old_num_tx_slots ||
+ !adapter->rx_pool ||
+ !adapter->tso_pool ||
+ !adapter->tx_pool) {
release_rx_pools(adapter);
release_tx_pools(adapter);
release_napi(adapter);
@@@ -2033,14 -2011,10 +2020,14 @@@
} else {
rc = reset_tx_pools(adapter);
if (rc)
+ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ rc);
goto out;
rc = reset_rx_pools(adapter);
if (rc)
+ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ rc);
goto out;
}
ibmvnic_disable_irqs(adapter);
@@@ -2106,7 -2080,7 +2093,7 @@@ static int do_hard_reset(struct ibmvnic
return rc;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc)
return rc;
@@@ -3581,8 -3555,7 +3568,7 @@@ static int ibmvnic_send_crq(struct ibmv
if (rc) {
if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
- if (test_bit(0, &adapter->resetting))
- ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ /* do not reset, report the fail, wait for passive init from server */
}
dev_warn(dev, "Send error (rc=%d)\n", rc);
@@@ -4305,6 -4278,11 +4291,11 @@@ static int handle_login_rsp(union ibmvn
struct net_device *netdev = adapter->netdev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
+ u64 *tx_handle_array;
+ u64 *rx_handle_array;
+ int num_tx_pools;
+ int num_rx_pools;
+ u64 *size_array;
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@@ -4339,6 -4317,30 +4330,30 @@@
ibmvnic_remove(adapter->vdev);
return -EIO;
}
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ /* variable buffer sizes are not supported, so just read the
+ * first entry.
+ */
+ adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
+
+ num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+ tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
+
+ for (i = 0; i < num_tx_pools; i++)
+ adapter->tx_scrq[i]->handle = tx_handle_array[i];
+
+ for (i = 0; i < num_rx_pools; i++)
+ adapter->rx_scrq[i]->handle = rx_handle_array[i];
+
+ adapter->num_active_tx_scrqs = num_tx_pools;
+ adapter->num_active_rx_scrqs = num_rx_pools;
+ release_login_rsp_buffer(adapter);
release_login_buffer(adapter);
complete(&adapter->init_done);
@@@ -4984,7 -4986,7 +4999,7 @@@ map_failed
return retrc;
}
- static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
@@@ -4993,12 -4995,19 +5008,19 @@@
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues;
- old_num_tx_queues = adapter->req_tx_queues;
+ if (reset) {
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+ reinit_completion(&adapter->init_done);
+ }
- reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc) {
+ dev_err(dev, "Send crq init failed with error %d\n", rc);
+ return rc;
+ }
+
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
return -1;
@@@ -5009,13 -5018,8 +5031,8 @@@
return adapter->init_done_rc;
}
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset
&&
+ if (reset &&
+ test_bit(0, &adapter->resetting) && !adapter->wait_for_reset
&&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
@@@ -5043,48 -5047,6 +5060,6 @@@
return rc;
}
- static int ibmvnic_init(struct ibmvnic_adapter *adapter)
- {
- struct device *dev = &adapter->vdev->dev;
- unsigned long timeout = msecs_to_jiffies(30000);
- int rc;
-
- adapter->from_passive_init = false;
-
- adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Initialization sequence timed out\n");
- return -1;
- }
-
- if (adapter->init_done_rc) {
- release_crq_queue(adapter);
- return adapter->init_done_rc;
- }
-
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- rc = init_sub_crqs(adapter);
- if (rc) {
- dev_err(dev, "Initialization of sub crqs failed\n");
- release_crq_queue(adapter);
- return rc;
- }
-
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- dev_err(dev, "Failed to initialize sub crq irqs\n");
- release_crq_queue(adapter);
- }
-
- return rc;
- }
-
static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@@ -5147,7 -5109,7 +5122,7 @@@
goto ibmvnic_init_fail;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
@@@ -5297,8 -5259,7 +5272,7 @@@ static unsigned long ibmvnic_get_desire
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
ret += adapter->rx_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
diff --combined drivers/net/ethernet/intel/igb/igb_main.c
index d9c3a6b169f9,698bb6a4b088..e1e37d0b7703
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@@ -718,6 -718,7 +718,6 @@@ static void igb_cache_ring_register(str
case e1000_i354:
case e1000_i210:
case e1000_i211:
- fallthrough;
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@@ -8046,10 -8047,7 +8046,7 @@@ static struct sk_buff *igb_construct_sk
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
- #if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
- #endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
@@@ -8103,10 -8101,7 +8100,7 @@@ static struct sk_buff *igb_build_skb(st
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
- #if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
- #endif
+ net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IGB_SKB_PAD, truesize);
diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 01a793105599,bb8c607cdcba..08181fc5f5d4
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@@ -737,7 -737,7 +737,7 @@@ static int rvu_nix_aq_enq_inst(struct r
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
- /* Fall through */
+ fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
@@@ -3319,6 -3319,49 +3319,49 @@@ void rvu_nix_lf_teardown(struct rvu *rv
nix_ctx_free(rvu, pfvf);
}
+ #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+ {
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int nixlf;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+ }
+
+ int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+ {
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+ }
+
+ int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+ {
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+ }
+
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)
diff --combined drivers/net/ethernet/netronome/nfp/flower/offload.c
index 36356f96661d,44cf738636ef..1c59aff2163c
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@@ -31,6 -31,7 +31,7 @@@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
@@@ -66,7 -67,8 +67,8 @@@
NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
- (NFP_FLOWER_LAYER_PORT | \
+ (NFP_FLOWER_LAYER_EXT_META | \
+ NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
@@@ -285,6 -287,30 +287,30 @@@ nfp_flower_calculate_key_layers(struct
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not
support VLAN PCP offload");
return -EOPNOTSUPP;
}
+ if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
+ !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan cvlan;
+
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not
support VLAN QinQ offload");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_vlan(rule, &cvlan);
+ if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@@ -784,7 -810,7 +810,7 @@@ nfp_flower_copy_pre_actions(char *act_d
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
if (tunnel_act)
*tunnel_act = true;
- /* fall through */
+ fallthrough;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
memcpy(act_dst + act_off, act_src + act_off, act_len);
break;
@@@ -1066,6 -1092,7 +1092,7 @@@ err_destroy_merge_flow
* nfp_flower_validate_pre_tun_rule()
* @app: Pointer to the APP handle
* @flow: Pointer to NFP flow representation of rule
+ * @key_ls: Pointer to NFP key layers structure
* @extack: Netlink extended ACK report
*
* Verifies the flow as a pre-tunnel rule.
@@@ -1075,10 -1102,13 +1102,13 @@@
static int
nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
struct nfp_fl_payload *flow,
+ struct nfp_fl_key_ls *key_ls,
struct netlink_ext_ack *extack)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_meta_tci *meta_tci;
struct nfp_flower_mac_mpls *mac;
+ u8 *ext = flow->unmasked_data;
struct nfp_fl_act_head *act;
u8 *mask = flow->mask_data;
bool vlan = false;
@@@ -1086,20 -1116,25 +1116,25 @@@
u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
- if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
- u16 vlan_tci = be16_to_cpu(meta_tci->tci);
-
- vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
- vlan = true;
- } else {
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ key_layer = key_ls->key_layer;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
- key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match
fields");
return -EOPNOTSUPP;
+ } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended
match fields");
+ return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
@@@ -1109,7 -1144,13 +1144,13 @@@
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ if (key_ls->key_layer_two) {
+ mask += sizeof(struct nfp_flower_ext_meta);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ }
mask += sizeof(struct nfp_flower_in_port);
+ ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
@@@ -1118,6 -1159,8 +1159,8 @@@
return -EOPNOTSUPP;
}
+ mask += sizeof(struct nfp_flower_mac_mpls);
+ ext += sizeof(struct nfp_flower_mac_mpls);
if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
key_layer & NFP_FLOWER_LAYER_IPV6) {
/* Flags and proto fields have same offset in IPv4 and IPv6. */
@@@ -1130,7 -1173,6 +1173,6 @@@
sizeof(struct nfp_flower_ipv4) :
sizeof(struct nfp_flower_ipv6);
- mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < size; i++)
@@@ -1138,6 -1180,25 +1180,25 @@@
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto
can be matched in ip header");
return -EOPNOTSUPP;
}
+ ext += size;
+ mask += size;
+ }
+
+ if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ struct nfp_flower_vlan *vlan_tags;
+ u16 vlan_tci;
+
+ vlan_tags = (struct nfp_flower_vlan *)ext;
+
+ vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
/* Action must be a single egress or pop_vlan and egress. */
@@@ -1220,7 -1281,7 +1281,7 @@@ nfp_flower_add_offload(struct nfp_app *
goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) {
- err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
if (err)
goto err_destroy_flow;
}
diff --combined drivers/net/ethernet/qlogic/qed/qed_dev.c
index b8f076e4e6b8,00f2d7f13de6..f7f08e6a3acf
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@@ -3109,14 -3109,14 +3109,14 @@@ int qed_hw_init(struct qed_dev *cdev, s
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall through */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall through */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_params->p_tunn,
@@@ -3973,6 -3973,7 +3973,7 @@@ static int qed_hw_get_nvm_info(struct q
struct qed_mcp_link_speed_params *ext_speed;
struct qed_mcp_link_capabilities *p_caps;
struct qed_mcp_link_params *link;
+ int i;
/* Read global nvm_cfg address */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@@ -4290,6 -4291,14 +4291,14 @@@
__set_bit(QED_DEV_CAP_ROCE,
&p_hwfn->hw_info.device_capabilities);
+ /* Read device serial number information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, serial_number);
+
+ for (i = 0; i < 4; i++)
+ p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
+
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c
index f39f629242a1,db5d003770ba..5b149ceff6b6
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@@ -39,6 -39,7 +39,7 @@@
#include "qed_hw.h"
#include "qed_selftest.h"
#include "qed_debug.h"
+ #include "qed_devlink.h"
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
@@@ -478,6 -479,7 +479,7 @@@ int qed_fill_dev_info(struct qed_dev *c
}
dev_info->mtu = hw_info->mtu;
+ cdev->common_dev_info = *dev_info;
return 0;
}
@@@ -510,107 -512,6 +512,6 @@@ static int qed_set_power_state(struct q
return 0;
}
- struct qed_devlink {
- struct qed_dev *cdev;
- };
-
- enum qed_devlink_param_id {
- QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- };
-
- static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
- {
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- ctx->val.vbool = cdev->iwarp_cmt;
-
- return 0;
- }
-
- static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
- {
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- cdev->iwarp_cmt = ctx->val.vbool;
-
- return 0;
- }
-
- static const struct devlink_param qed_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
- "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- qed_dl_param_get, qed_dl_param_set, NULL),
- };
-
- static const struct devlink_ops qed_dl_ops;
-
- static int qed_devlink_register(struct qed_dev *cdev)
- {
- union devlink_param_value value;
- struct qed_devlink *qed_dl;
- struct devlink *dl;
- int rc;
-
- dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
- if (!dl)
- return -ENOMEM;
-
- qed_dl = devlink_priv(dl);
-
- cdev->dl = dl;
- qed_dl->cdev = cdev;
-
- rc = devlink_register(dl, &cdev->pdev->dev);
- if (rc)
- goto err_free;
-
- rc = devlink_params_register(dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
- if (rc)
- goto err_unregister;
-
- value.vbool = false;
- devlink_param_driverinit_value_set(dl,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- value);
-
- devlink_params_publish(dl);
- cdev->iwarp_cmt = false;
-
- return 0;
-
- err_unregister:
- devlink_unregister(dl);
-
- err_free:
- cdev->dl = NULL;
- devlink_free(dl);
-
- return rc;
- }
-
- static void qed_devlink_unregister(struct qed_dev *cdev)
- {
- if (!cdev->dl)
- return;
-
- devlink_params_unregister(cdev->dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
-
- devlink_unregister(cdev->dl);
- devlink_free(cdev->dl);
- }
-
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
struct qed_probe_params *params)
@@@ -639,12 -540,6 +540,6 @@@
}
DP_INFO(cdev, "PCI init completed successfully\n");
- rc = qed_devlink_register(cdev);
- if (rc) {
- DP_INFO(cdev, "Failed to register devlink.\n");
- goto err2;
- }
-
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
@@@ -674,8 -569,6 +569,6 @@@ static void qed_remove(struct qed_dev *
qed_set_power_state(cdev, PCI_D3hot);
- qed_devlink_unregister(cdev);
-
qed_free_cdev(cdev);
}
@@@ -761,7 -654,7 +654,7 @@@ static int qed_set_int_mode(struct qed_
kfree(int_params->msix_table);
if (force_mode)
goto out;
- /* Fallthrough */
+ fallthrough;
case QED_INT_MODE_MSI:
if (cdev->num_hwfns == 1) {
@@@ -775,7 -668,7 +668,7 @@@
if (force_mode)
goto out;
}
- /* Fallthrough */
+ fallthrough;
case QED_INT_MODE_INTA:
int_params->out.int_mode = QED_INT_MODE_INTA;
@@@ -2924,7 -2817,7 +2817,7 @@@ static int qed_set_led(struct qed_dev *
return status;
}
- static int qed_recovery_process(struct qed_dev *cdev)
+ int qed_recovery_process(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt;
@@@ -3112,6 -3005,9 +3005,9 @@@ const struct qed_common_ops qed_common_
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .devlink_register = qed_devlink_register,
+ .devlink_unregister = qed_devlink_unregister,
+ .report_fatal_error = qed_report_fatal_error,
.dbg_all_data = &qed_dbg_all_data,
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
diff --combined drivers/net/ethernet/realtek/r8169_main.c
index fc9e6626db55,c427865d51a4..9e4e6a883877
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@@ -617,7 -617,6 +617,6 @@@ struct rtl8169_private
struct work_struct work;
} wk;
- unsigned irq_enabled:1;
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
dma_addr_t counters_phys_addr;
@@@ -1280,12 -1279,10 +1279,10 @@@ static void rtl_irq_disable(struct rtl8
RTL_W32(tp, IntrMask_8125, 0);
else
RTL_W16(tp, IntrMask, 0);
- tp->irq_enabled = 0;
}
static void rtl_irq_enable(struct rtl8169_private *tp)
{
- tp->irq_enabled = 1;
if (rtl_is_8125(tp))
RTL_W32(tp, IntrMask_8125, tp->irq_mask);
else
@@@ -4541,8 -4538,7 +4538,7 @@@ static irqreturn_t rtl8169_interrupt(in
struct rtl8169_private *tp = dev_instance;
u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
- !(status & tp->irq_mask))
+ if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@@ -4596,10 -4592,8 +4592,8 @@@ static int rtl8169_poll(struct napi_str
rtl_tx(dev, tp, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
rtl_irq_enable(tp);
- }
return work_done;
}
@@@ -4994,7 -4988,7 +4988,7 @@@ static int rtl_alloc_irq(struct rtl8169
rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
rtl_lock_config_regs(tp);
- /* fall through */
+ fallthrough;
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
flags = PCI_IRQ_LEGACY;
break;
@@@ -5137,7 -5131,7 +5131,7 @@@ static void rtl_hw_initialize(struct rt
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
- /* fall through */
+ fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
diff --combined drivers/net/phy/dp83640.c
index 79e67f2fe00a,fc3d747eba55..f2caccaf4408
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@@ -766,13 -766,13 +766,13 @@@ static int decode_evnt(struct dp83640_p
switch (words) {
case 3:
dp83640->edata.sec_hi = phy_txts->sec_hi;
- /* fall through */
+ fallthrough;
case 2:
dp83640->edata.sec_lo = phy_txts->sec_lo;
- /* fall through */
+ fallthrough;
case 1:
dp83640->edata.ns_hi = phy_txts->ns_hi;
- /* fall through */
+ fallthrough;
case 0:
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
@@@ -798,51 -798,32 +798,32 @@@
return parsed;
}
- #define DP83640_PACKET_HASH_OFFSET 20
#define DP83640_PACKET_HASH_LEN 10
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
{
- unsigned int offset = 0;
- u8 *msgtype, *data = skb_mac_header(skb);
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
u16 hash;
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- }
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
- return 0;
+ msgtype = ptp_get_msgtype(hdr, type);
- if (unlikely(type & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
- if (rxts->msgtype != (*msgtype & 0xf))
+ if (rxts->msgtype != (msgtype & 0xf))
return 0;
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- if (rxts->seqid != ntohs(*seqid))
+ seqid = be16_to_cpu(hdr->sequence_id);
+ if (rxts->seqid != seqid)
return 0;
hash = ether_crc(DP83640_PACKET_HASH_LEN,
- data + offset + DP83640_PACKET_HASH_OFFSET) >> 20;
+ (unsigned char *)&hdr->source_port_identity) >> 20;
if (rxts->hash != hash)
return 0;
@@@ -982,35 -963,16 +963,16 @@@ static void decode_status_frame(struct
static int is_sync(struct sk_buff *skb, int type)
{
- u8 *data = skb->data, *msgtype;
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (type & PTP_CLASS_V1)
- offset += OFF_PTP_CONTROL;
+ struct ptp_header *hdr;
+ u8 msgtype;
- if (skb->len < offset + 1)
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, type);
- return (*msgtype & 0xf) == 0;
+ return (msgtype & 0xf) == 0;
}
static void dp83640_free_clocks(void)
@@@ -1409,7 -1371,7 +1371,7 @@@ static void dp83640_txtstamp(struct mii
kfree_skb(skb);
return;
}
- /* fall through */
+ fallthrough;
case HWTSTAMP_TX_ON:
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
diff --combined fs/io_uring.c
index 8a53af8e5fe2,1fd03a38400c..97b6907fc4c7
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@@ -2563,7 -2563,7 +2563,7 @@@ static inline void io_rw_done(struct ki
* IO with EINTR.
*/
ret = -EINTR;
- /* fall through */
+ fallthrough;
default:
kiocb->ki_complete(kiocb, ret, 0);
}
@@@ -4898,6 -4898,12 +4898,12 @@@ static bool io_arm_poll_handler(struct
mask |= POLLIN | POLLRDNORM;
if (def->pollout)
mask |= POLLOUT | POLLWRNORM;
+
+ /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
+ if ((req->opcode == IORING_OP_RECVMSG) &&
+ (req->sr_msg.msg_flags & MSG_ERRQUEUE))
+ mask &= ~POLLIN;
+
mask |= POLLERR | POLLPRI;
ipt.pt._qproc = io_async_queue_proc;
diff --combined net/batman-adv/bat_v_ogm.c
index 717fe657561d,11c3f98ba938..8c1148fc73d7
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@@ -20,6 -20,7 +20,7 @@@
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
+ #include <linux/prandom.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@@ -881,12 -882,6 +882,12 @@@ static void batadv_v_ogm_process(const
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
@@@ -1014,6 -1009,11 +1015,6 @@@ int batadv_v_ogm_packet_recv(struct sk_
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
- goto free_skb;
-
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 8500f56cbd10,5c41cc52bc53..ab6cec3c7586
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -437,10 -437,7 +437,10 @@@ static void batadv_bla_send_claim(struc
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- netif_rx(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
out:
if (primary_if)
batadv_hardif_put(primary_if);
@@@ -1798,7 -1795,7 +1798,7 @@@ batadv_bla_loopdetect_check(struct bata
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function
+ /* backbone_gw is unreferenced in the report work function
* if queue_work() call was successful
*/
if (!ret)
diff --combined net/core/devlink.c
index 80ec1cd81c64,58c8bb07fa19..49e911c19881
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@@ -5895,6 -5895,7 +5895,7 @@@ devlink_nl_cmd_health_reporter_get_dump
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
+ mutex_lock(&devlink->lock);
list_for_each_entry(port, &devlink->port_list, list) {
mutex_lock(&port->reporters_lock);
list_for_each_entry(reporter, &port->reporter_list, list) {
@@@ -5909,12 -5910,14 +5910,14 @@@
NLM_F_MULTI);
if (err) {
mutex_unlock(&port->reporters_lock);
+ mutex_unlock(&devlink->lock);
goto out;
}
idx++;
}
mutex_unlock(&port->reporters_lock);
}
+ mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_mutex);
@@@ -6196,8 -6199,8 +6199,8 @@@ devlink_trap_action_get_from_info(struc
val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
switch (val) {
- case DEVLINK_TRAP_ACTION_DROP: /* fall-through */
- case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */
+ case DEVLINK_TRAP_ACTION_DROP:
+ case DEVLINK_TRAP_ACTION_TRAP:
case DEVLINK_TRAP_ACTION_MIRROR:
*p_trap_action = val;
break;
@@@ -7555,11 -7558,11 +7558,11 @@@ int devlink_port_register(struct devlin
devlink_port->index = port_index;
devlink_port->registered = true;
spin_lock_init(&devlink_port->type_lock);
+ INIT_LIST_HEAD(&devlink_port->reporter_list);
+ mutex_init(&devlink_port->reporters_lock);
list_add_tail(&devlink_port->list, &devlink->port_list);
INIT_LIST_HEAD(&devlink_port->param_list);
mutex_unlock(&devlink->lock);
- INIT_LIST_HEAD(&devlink_port->reporter_list);
- mutex_init(&devlink_port->reporters_lock);
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
@@@ -7576,13 -7579,13 +7579,13 @@@ void devlink_port_unregister(struct dev
{
struct devlink *devlink = devlink_port->devlink;
- WARN_ON(!list_empty(&devlink_port->reporter_list));
- mutex_destroy(&devlink_port->reporters_lock);
devlink_port_type_warn_cancel(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
mutex_lock(&devlink->lock);
list_del(&devlink_port->list);
mutex_unlock(&devlink->lock);
+ WARN_ON(!list_empty(&devlink_port->reporter_list));
+ mutex_destroy(&devlink_port->reporters_lock);
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
diff --combined net/core/skbuff.c
index 6faf73d6a0f7,a5c11aae9c89..bfd748346f20
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -820,7 -820,6 +820,7 @@@ void skb_tx_error(struct sk_buff *skb
}
EXPORT_SYMBOL(skb_tx_error);
+#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@@ -838,7 -837,6 +838,7 @@@ void consume_skb(struct sk_buff *skb
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
+#endif
/**
* consume_stateless_skb - free an skbuff, assuming it is stateless
@@@ -5955,8 -5953,7 +5955,7 @@@ static int pskb_carve_inside_nonlinear(
size = SKB_WITH_OVERHEAD(ksize(data));
memcpy((struct skb_shared_info *)(data + size),
- skb_shinfo(skb), offsetof(struct skb_shared_info,
- frags[skb_shinfo(skb)->nr_frags]));
+ skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
if (skb_orphan_frags(skb, gfp_mask)) {
kfree(data);
return -ENOMEM;
diff --combined net/core/sock.c
index f8e5ccc45272,64d2aec5ed45..19324489939e
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@@ -413,18 -413,6 +413,6 @@@ static int sock_set_timeout(long *timeo
return 0;
}
- static void sock_warn_obsolete_bsdism(const char *name)
- {
- static int warned;
- static char warncomm[TASK_COMM_LEN];
- if (strcmp(warncomm, current->comm) && warned < 5) {
- strcpy(warncomm, current->comm);
- pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
- warncomm, name);
- warned++;
- }
- }
-
static bool sock_needs_netstamp(const struct sock *sk)
{
switch (sk->sk_family) {
@@@ -984,7 -972,6 +972,6 @@@ set_sndbuf
break;
case SO_BSDCOMPAT:
- sock_warn_obsolete_bsdism("setsockopt");
break;
case SO_PASSCRED:
@@@ -1008,7 -995,7 +995,7 @@@
break;
case SO_TIMESTAMPING_NEW:
sock_set_flag(sk, SOCK_TSTAMP_NEW);
- /* fall through */
+ fallthrough;
case SO_TIMESTAMPING_OLD:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
@@@ -1387,7 -1374,6 +1374,6 @@@ int sock_getsockopt(struct socket *sock
break;
case SO_BSDCOMPAT:
- sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP_OLD:
diff --combined net/netlink/policy.c
index 641ffbdd977a,7b1f50531cd3..e6de719e6634
--- a/net/netlink/policy.c
+++ b/net/netlink/policy.c
@@@ -188,7 -188,7 +188,7 @@@ send_attribute
goto next;
case NLA_NESTED:
type = NL_ATTR_TYPE_NESTED;
- /* fall through */
+ fallthrough;
case NLA_NESTED_ARRAY:
if (pt->type == NLA_NESTED_ARRAY)
type = NL_ATTR_TYPE_NESTED_ARRAY;
@@@ -254,12 -254,6 +254,6 @@@
pt->bitfield32_valid))
goto nla_put_failure;
break;
- case NLA_EXACT_LEN:
- type = NL_ATTR_TYPE_BINARY;
- if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len) ||
- nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, pt->len))
- goto nla_put_failure;
- break;
case NLA_STRING:
case NLA_NUL_STRING:
case NLA_BINARY:
@@@ -269,14 -263,26 +263,26 @@@
type = NL_ATTR_TYPE_NUL_STRING;
else
type = NL_ATTR_TYPE_BINARY;
- if (pt->len && nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
- pt->len))
- goto nla_put_failure;
- break;
- case NLA_MIN_LEN:
- type = NL_ATTR_TYPE_BINARY;
- if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len))
+
+ if (pt->validation_type != NLA_VALIDATE_NONE) {
+ struct netlink_range_validation range;
+
+ nla_get_range_unsigned(pt, &range);
+
+ if (range.min &&
+ nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH,
+ range.min))
+ goto nla_put_failure;
+
+ if (range.max < U16_MAX &&
+ nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
+ range.max))
+ goto nla_put_failure;
+ } else if (pt->len &&
+ nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
+ pt->len)) {
goto nla_put_failure;
+ }
break;
case NLA_FLAG:
type = NL_ATTR_TYPE_FLAG;
diff --combined net/sctp/sm_make_chunk.c
index c11c24524652,467bd77b6986..9a56ae2f3651
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@@ -1235,7 -1235,7 +1235,7 @@@ nodata
/* Create an Operation Error chunk of a fixed size, specifically,
* min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads.
- * This is a helper function to allocate an error chunk for for those
+ * This is a helper function to allocate an error chunk for those
* invalid parameter codes in which we may not want to report all the
* errors, if the incoming chunk is large. If it can't fit in a single
* packet, we ignore it.
@@@ -1780,7 -1780,7 +1780,7 @@@ no_hmac
* for init collision case of lost COOKIE ACK.
* If skb has been timestamped, then use the stamp, otherwise
* use current time. This introduces a small possibility that
- * that a cookie may be considered expired, but his would only slow
+ * a cookie may be considered expired, but this would only slow
* down the new association establishment instead of every packet.
*/
if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
@@@ -2077,7 -2077,7 +2077,7 @@@ static enum sctp_ierror sctp_process_un
break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = SCTP_IERROR_ERROR;
- /* Fall through */
+ fallthrough;
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
@@@ -2319,7 -2319,7 +2319,7 @@@ int sctp_process_init(struct sctp_assoc
/* This implementation defaults to making the first transport
* added as the primary transport. The source address seems to
- * be a a better choice than any of the embedded addresses.
+ * be a better choice than any of the embedded addresses.
*/
if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
goto nomem;
diff --combined net/wireless/nl80211.c
index 2c9e9a2d1688,e45b95446ea0..fe33dd5d270f
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@@ -654,10 -654,8 +654,8 @@@ static const struct nla_policy nl80211_
[NL80211_ATTR_RECEIVE_MULTICAST] = { .type = NLA_FLAG },
[NL80211_ATTR_WIPHY_FREQ_OFFSET] = NLA_POLICY_RANGE(NLA_U32, 0, 999),
[NL80211_ATTR_SCAN_FREQ_KHZ] = { .type = NLA_NESTED },
- [NL80211_ATTR_HE_6GHZ_CAPABILITY] = {
- .type = NLA_EXACT_LEN,
- .len = sizeof(struct ieee80211_he_6ghz_capa),
- },
+ [NL80211_ATTR_HE_6GHZ_CAPABILITY] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct ieee80211_he_6ghz_capa)),
};
/* policy for the key attributes */
@@@ -703,7 -701,7 +701,7 @@@ nl80211_wowlan_tcp_policy[NUM_NL80211_W
[NL80211_WOWLAN_TCP_DST_MAC] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
[NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
- [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
+ [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = NLA_POLICY_MIN_LEN(1),
[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
.len = sizeof(struct nl80211_wowlan_tcp_data_seq)
},
@@@ -711,8 -709,8 +709,8 @@@
.len = sizeof(struct nl80211_wowlan_tcp_data_token)
},
[NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
- [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
- [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 },
+ [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = NLA_POLICY_MIN_LEN(1),
+ [NL80211_WOWLAN_TCP_WAKE_MASK] = NLA_POLICY_MIN_LEN(1),
};
#endif /* CONFIG_PM */
@@@ -2107,7 -2105,7 +2105,7 @@@ static int nl80211_send_wiphy(struct cf
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 1:
if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
sizeof(u32) * rdev->wiphy.n_cipher_suites,
@@@ -2154,7 -2152,7 +2152,7 @@@
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 2:
if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
rdev->wiphy.interface_modes))
@@@ -2162,7 -2160,7 +2160,7 @@@
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 3:
nl_bands = nla_nest_start_noflag(msg,
NL80211_ATTR_WIPHY_BANDS);
@@@ -2189,7 -2187,7 +2187,7 @@@
state->chan_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
default:
/* add frequencies */
nl_freqs = nla_nest_start_noflag(msg,
@@@ -2244,7 -2242,7 +2242,7 @@@
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 4:
nl_cmds = nla_nest_start_noflag(msg,
NL80211_ATTR_SUPPORTED_COMMANDS);
@@@ -2273,7 -2271,7 +2271,7 @@@
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 5:
if (rdev->ops->remain_on_channel &&
(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
@@@ -2291,7 -2289,7 +2289,7 @@@
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 6:
#ifdef CONFIG_PM
if (nl80211_send_wowlan(msg, rdev, state->split))
@@@ -2302,7 -2300,7 +2300,7 @@@
#else
state->split_start++;
#endif
- /* fall through */
+ fallthrough;
case 7:
if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
rdev->wiphy.software_iftypes))
@@@ -2315,7 -2313,7 +2313,7 @@@
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 8:
if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
@@@ -5207,7 -5205,7 +5205,7 @@@ bool nl80211_put_sta_rate(struct sk_buf
break;
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case RATE_INFO_BW_20:
rate_flg = 0;
break;
@@@ -6011,7 -6009,7 +6009,7 @@@ static int nl80211_set_station(struct s
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
params.he_6ghz_capa =
- nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+ nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
params.airtime_weight =
--
LinuxNextTracking