The following commit has been merged in the master branch: commit 27058af401e49d88a905df000dd26f443fcfa8ce Merge: 357f4aae859b5d74554b0ccbb18556f1df4166c3 2a26d99b251b8625d27aed14e97fc10707a3a81f Author: David S. Miller davem@davemloft.net Date: Sun Oct 30 12:42:58 2016 -0400
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes.
For example, David Ahern's adjacency list revamp in 'net-next' conflicted with an adjacency list traversal bug fix in 'net'.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index f18b546,4012c2f..e5c17a9 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -316,6 -316,14 +316,14 @@@ W: https://01.org/linux-acp S: Supported F: drivers/acpi/fan.c
+ ACPI FOR ARM64 (ACPI/arm64) + M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com + M: Hanjun Guo hanjun.guo@linaro.org + M: Sudeep Holla sudeep.holla@arm.com + L: linux-acpi@vger.kernel.org + S: Maintained + F: drivers/acpi/arm64 + ACPI THERMAL DRIVER M: Zhang Rui rui.zhang@intel.com L: linux-acpi@vger.kernel.org @@@ -1434,6 -1442,7 +1442,7 @@@ F: drivers/cpufreq/mvebu-cpufreq. F: arch/arm/configs/mvebu_*_defconfig
ARM/Marvell Berlin SoC support + M: Jisheng Zhang jszhang@marvell.com M: Sebastian Hesselbarth sebastian.hesselbarth@gmail.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -2521,8 -2530,6 +2530,8 @@@ L: netdev@vger.kernel.or L: linux-kernel@vger.kernel.org S: Supported F: kernel/bpf/ +F: tools/testing/selftests/bpf/ +F: lib/test_bpf.c
BROADCOM B44 10/100 ETHERNET DRIVER M: Michael Chan michael.chan@broadcom.com @@@ -2545,15 -2552,18 +2554,18 @@@ S: Supporte F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER - M: Sony Chacko sony.chacko@qlogic.com - M: Dept-HSGLinuxNICDev@qlogic.com + M: Rasesh Mody rasesh.mody@cavium.com + M: Harish Patil harish.patil@cavium.com + M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2.* F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER - M: Ariel Elior ariel.elior@qlogic.com + M: Yuval Mintz Yuval.Mintz@cavium.com + M: Ariel Elior ariel.elior@cavium.com + M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ @@@ -2760,7 -2770,9 +2772,9 @@@ S: Supporte F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER - M: Rasesh Mody rasesh.mody@qlogic.com + M: Rasesh Mody rasesh.mody@cavium.com + M: Sudarsana Kalluru sudarsana.kalluru@cavium.com + M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/brocade/bna/ @@@ -4614,8 -4626,9 +4628,9 @@@ F: sound/usb/misc/ua101.
EXTENSIBLE FIRMWARE INTERFACE (EFI) M: Matt Fleming matt@codeblueprint.co.uk + M: Ard Biesheuvel ard.biesheuvel@linaro.org L: linux-efi@vger.kernel.org - T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git S: Maintained F: Documentation/efi-stub.txt F: arch/ia64/kernel/efi.c @@@ -5280,6 -5293,12 +5295,12 @@@ M: Joe Perches <joe@perches.com S: Maintained F: scripts/get_maintainer.pl
+ GENWQE (IBM Generic Workqueue Card) + M: Frank Haverkamp haver@linux.vnet.ibm.com + M: Gabriel Krisman Bertazi krisman@linux.vnet.ibm.com + S: Supported + F: drivers/misc/genwqe/ + GFS2 FILE SYSTEM M: Steven Whitehouse swhiteho@redhat.com M: Bob Peterson rpeterso@redhat.com @@@ -6125,6 -6144,12 +6146,12 @@@ M: Stanislaw Gruszka <stf_xl@wp.pl S: Maintained F: drivers/usb/atm/ueagle-atm.c
+ IMGTEC ASCII LCD DRIVER + M: Paul Burton paul.burton@imgtec.com + S: Maintained + F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt + F: drivers/auxdisplay/img-ascii-lcd.c + INA209 HARDWARE MONITOR DRIVER M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org @@@ -6436,6 -6461,7 +6463,7 @@@ F: include/linux/mei_cl_bus. F: drivers/misc/mei/* F: drivers/watchdog/mei_wdt.c F: Documentation/misc-devices/mei/* + F: samples/mei/*
INTEL MIC DRIVERS (mic) M: Sudeep Dutt sudeep.dutt@intel.com @@@ -6622,10 -6648,10 +6650,10 @@@ S: Maintaine F: drivers/firmware/iscsi_ibft*
ISCSI - M: Mike Christie michaelc@cs.wisc.edu + M: Lee Duncan lduncan@suse.com + M: Chris Leech cleech@redhat.com L: open-iscsi@googlegroups.com - W: www.open-iscsi.org - T: git git://git.kernel.org/pub/scm/linux/kernel/git/mnc/linux-2.6-iscsi.git + W: www.open-iscsi.com S: Maintained F: drivers/scsi/*iscsi* F: include/scsi/*iscsi* @@@ -7203,17 -7229,11 +7231,11 @@@ F: drivers/lightnvm F: include/linux/lightnvm.h F: include/uapi/linux/lightnvm.h
- LINUX FOR IBM pSERIES (RS/6000) - M: Paul Mackerras paulus@au.ibm.com - W: http://www.ibm.com/linux/ltc/projects/ppc - S: Supported - F: arch/powerpc/boot/rs6000.h - LINUX FOR POWERPC (32-BIT AND 64-BIT) M: Benjamin Herrenschmidt benh@kernel.crashing.org M: Paul Mackerras paulus@samba.org M: Michael Ellerman mpe@ellerman.id.au - W: http://www.penguinppc.org/ + W: https://github.com/linuxppc/linux/wiki L: linuxppc-dev@lists.ozlabs.org Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git @@@ -7228,6 -7248,7 +7250,7 @@@ F: drivers/net/ethernet/ibm/ibmvnic. F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/rpa* F: drivers/scsi/ibmvscsi/ + F: tools/testing/selftests/powerpc N: opal N: /pmac N: powermac @@@ -7284,9 -7305,8 +7307,8 @@@ F: arch/powerpc/platforms/83xx F: arch/powerpc/platforms/85xx/
LINUX FOR POWERPC PA SEMI PWRFICIENT - M: Olof Johansson olof@lixom.net L: linuxppc-dev@lists.ozlabs.org - S: Maintained + S: Orphan F: arch/powerpc/platforms/pasemi/ F: drivers/*/*pasemi* F: drivers/*/*/*pasemi* @@@ -7829,6 -7849,13 +7851,13 @@@ F: Documentation/scsi/megaraid.tx F: drivers/scsi/megaraid.* F: drivers/scsi/megaraid/
+ MELFAS MIP4 TOUCHSCREEN DRIVER + M: Sangwon Jee jeesw@melfas.com + W: http://www.melfas.com + S: Supported + F: drivers/input/touchscreen/melfas_mip4.c + F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt + MELLANOX ETHERNET DRIVER (mlx4_en) M: Tariq Toukan tariqt@mellanox.com L: netdev@vger.kernel.org @@@ -8085,6 -8112,7 +8114,7 @@@ S: Maintaine F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT + M: Jessica Yu jeyu@redhat.com M: Rusty Russell rusty@rustcorp.com.au S: Maintained F: include/linux/module.h @@@ -8198,7 -8226,7 +8228,7 @@@ F: include/linux/mfd MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM M: Ulf Hansson ulf.hansson@linaro.org L: linux-mmc@vger.kernel.org - T: git git://git.linaro.org/people/ulf.hansson/mmc.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git S: Maintained F: Documentation/devicetree/bindings/mmc/ F: drivers/mmc/ @@@ -8415,6 -8443,7 +8445,6 @@@ F: include/uapi/linux/net_namespace. F: tools/net/ F: tools/testing/selftests/net/ F: lib/random32.c -F: lib/test_bpf.c
NETWORKING [IPv4/IPv6] M: "David S. Miller" davem@davemloft.net @@@ -8493,11 -8522,10 +8523,10 @@@ F: Documentation/devicetree/bindings/ne F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT - M: Manish Chopra manish.chopra@qlogic.com - M: Sony Chacko sony.chacko@qlogic.com - M: Rajesh Borundia rajesh.borundia@qlogic.com + M: Manish Chopra manish.chopra@cavium.com + M: Rahul Verma rahul.verma@cavium.com + M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org - W: http://www.qlogic.com S: Supported F: drivers/net/ethernet/qlogic/netxen/
@@@ -9020,15 -9048,13 +9049,13 @@@ S: Maintaine F: drivers/net/wireless/intersil/p54/
PA SEMI ETHERNET DRIVER - M: Olof Johansson olof@lixom.net L: netdev@vger.kernel.org - S: Maintained + S: Orphan F: drivers/net/ethernet/pasemi/*
PA SEMI SMBUS DRIVER - M: Olof Johansson olof@lixom.net L: linux-i2c@vger.kernel.org - S: Maintained + S: Orphan F: drivers/i2c/busses/i2c-pasemi.c
PADATA PARALLEL EXECUTION MECHANISM @@@ -9286,7 -9312,7 +9313,7 @@@ S: Maintaine F: drivers/pci/host/*designware*
PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE - M: Joao Pinto jpinto@synopsys.com + M: Jose Abreu Jose.Abreu@synopsys.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/designware-pcie.txt @@@ -9377,6 -9403,7 +9404,7 @@@ W: http://lists.infradead.org/mailman/l T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git S: Maintained F: Documentation/pcmcia/ + F: tools/pcmcia/ F: drivers/pcmcia/ F: include/pcmcia/
@@@ -9874,33 -9901,32 +9902,32 @@@ F: Documentation/scsi/LICENSE.qla4xx F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER - M: Jitendra Kalsaria jitendra.kalsaria@qlogic.com - M: Ron Mercer ron.mercer@qlogic.com - M: linux-driver@qlogic.com + M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER - M: Dept-GELinuxNICDev@qlogic.com + M: Harish Patil harish.patil@cavium.com + M: Manish Chopra manish.chopra@cavium.com + M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER - M: Harish Patil harish.patil@qlogic.com - M: Sudarsana Kalluru sudarsana.kalluru@qlogic.com - M: Dept-GELinuxNICDev@qlogic.com - M: linux-driver@qlogic.com + M: Harish Patil harish.patil@cavium.com + M: Manish Chopra manish.chopra@cavium.com + M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER - M: Yuval Mintz Yuval.Mintz@qlogic.com - M: Ariel Elior Ariel.Elior@qlogic.com - M: everest-linux-l2@qlogic.com + M: Yuval Mintz Yuval.Mintz@cavium.com + M: Ariel Elior Ariel.Elior@cavium.com + M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qed/ diff --combined drivers/infiniband/ulp/ipoib/ipoib_main.c index ae5d7cd,b58d9dc..c50794f --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@@ -292,25 -292,6 +292,25 @@@ static struct net_device *ipoib_get_mas return dev; }
+struct ipoib_walk_data { + const struct sockaddr *addr; + struct net_device *result; +}; + +static int ipoib_upper_walk(struct net_device *upper, void *_data) +{ + struct ipoib_walk_data *data = _data; + int ret = 0; + + if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { + dev_hold(upper); + data->result = upper; + ret = 1; + } + + return ret; +} + /** * Find a net_device matching the given address, which is an upper device of * the given net_device. @@@ -323,21 -304,27 +323,21 @@@ static struct net_device *ipoib_get_net_dev_match_addr( const struct sockaddr *addr, struct net_device *dev) { - struct net_device *upper, - *result = NULL; - struct list_head *iter; + struct ipoib_walk_data data = { + .addr = addr, + };
rcu_read_lock(); if (ipoib_is_dev_match_addr_rcu(addr, dev)) { dev_hold(dev); - result = dev; + data.result = dev; goto out; }
- netdev_for_each_all_upper_dev_rcu(dev, upper, iter) { - if (ipoib_is_dev_match_addr_rcu(addr, upper)) { - dev_hold(upper); - result = upper; - break; - } - } + netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data); out: rcu_read_unlock(); - return result; + return data.result; }
/* returns the number of IPoIB netdevs on top a given ipoib device matching a @@@ -938,9 -925,12 +938,12 @@@ static void neigh_add_path(struct sk_bu ipoib_neigh_free(neigh); goto err_drop; } - if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) + if (skb_queue_len(&neigh->queue) < + IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, IPOIB_PSEUDO_LEN); __skb_queue_tail(&neigh->queue, skb); - else { + } else { ipoib_warn(priv, "queue length limit %d. Packet drop.\n", skb_queue_len(&neigh->queue)); goto err_drop; @@@ -977,7 -967,7 +980,7 @@@ err_drop }
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, - struct ipoib_cb *cb) + struct ipoib_pseudo_header *phdr) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_path *path; @@@ -985,16 -975,18 +988,18 @@@
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4); + path = __path_find(dev, phdr->hwaddr + 4); if (!path || !path->valid) { int new_path = 0;
if (!path) { - path = path_rec_create(dev, cb->hwaddr + 4); + path = path_rec_create(dev, phdr->hwaddr + 4); new_path = 1; } if (path) { if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, IPOIB_PSEUDO_LEN); __skb_queue_tail(&path->queue, skb); } else { ++dev->stats.tx_dropped; @@@ -1022,10 -1014,12 +1027,12 @@@ be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags); - ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); + ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); return; } else if ((path->query || !path_rec_start(dev, path)) && skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, IPOIB_PSEUDO_LEN); __skb_queue_tail(&path->queue, skb); } else { ++dev->stats.tx_dropped; @@@ -1039,13 -1033,15 +1046,15 @@@ static int ipoib_start_xmit(struct sk_b { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; - struct ipoib_cb *cb = ipoib_skb_cb(skb); + struct ipoib_pseudo_header *phdr; struct ipoib_header *header; unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data; + skb_pull(skb, sizeof(*phdr)); header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) { + if (unlikely(phdr->hwaddr[4] == 0xff)) { /* multicast, arrange "if" according to probability */ if ((header->proto != htons(ETH_P_IP)) && (header->proto != htons(ETH_P_IPV6)) && @@@ -1058,13 -1054,13 +1067,13 @@@ return NETDEV_TX_OK; } /* Add in the P_Key for multicast*/ - cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; - cb->hwaddr[9] = priv->pkey & 0xff; + phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; + phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr); + neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (likely(neigh)) goto send_using_neigh; - ipoib_mcast_send(dev, cb->hwaddr, skb); + ipoib_mcast_send(dev, phdr->hwaddr, skb); return NETDEV_TX_OK; }
@@@ -1073,16 -1069,16 +1082,16 @@@ case htons(ETH_P_IP): case htons(ETH_P_IPV6): case htons(ETH_P_TIPC): - neigh = ipoib_neigh_get(dev, cb->hwaddr); + neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (unlikely(!neigh)) { - neigh_add_path(skb, cb->hwaddr, dev); + neigh_add_path(skb, phdr->hwaddr, dev); return NETDEV_TX_OK; } break; case htons(ETH_P_ARP): case htons(ETH_P_RARP): /* for unicast ARP and RARP should always perform path find */ - unicast_arp_send(skb, dev, cb); + unicast_arp_send(skb, dev, phdr); return NETDEV_TX_OK; default: /* ethertype not supported by IPoIB */ @@@ -1099,11 -1095,13 +1108,13 @@@ send_using_neigh goto unref; } } else if (neigh->ah) { - ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); + ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr)); goto unref; }
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, sizeof(*phdr)); spin_lock_irqsave(&priv->lock, flags); __skb_queue_tail(&neigh->queue, skb); spin_unlock_irqrestore(&priv->lock, flags); @@@ -1135,8 -1133,8 +1146,8 @@@ static int ipoib_hard_header(struct sk_ unsigned short type, const void *daddr, const void *saddr, unsigned len) { + struct ipoib_pseudo_header *phdr; struct ipoib_header *header; - struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@@ -1145,12 -1143,13 +1156,13 @@@
/* * we don't rely on dst_entry structure, always stuff the - * destination address into skb->cb so we can figure out where + * destination address into skb hard header so we can figure out where * to send the packet later. */ - memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); + phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr)); + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header; + return IPOIB_HARD_LEN; }
static void ipoib_set_mcast_list(struct net_device *dev) @@@ -1772,7 -1771,7 +1784,7 @@@ void ipoib_setup(struct net_device *dev
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN; + dev->hard_header_len = IPOIB_HARD_LEN; dev->addr_len = INFINIBAND_ALEN; dev->type = ARPHRD_INFINIBAND; dev->tx_queue_len = ipoib_sendq_size * 2; @@@ -2017,7 -2016,6 +2029,7 @@@ static struct net_device *ipoib_add_por /* MTU will be reset when mcast join happens */ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; + priv->dev->max_mtu = IPOIB_CM_MTU;
priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
diff --combined drivers/net/ethernet/aurora/nb8800.c index 99c4055,00c38bf..b59aa35 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@@ -975,10 -975,8 +975,10 @@@ static int nb8800_open(struct net_devic phydev = of_phy_connect(dev, priv->phy_node, nb8800_link_reconfigure, 0, priv->phy_mode); - if (!phydev) + if (!phydev) { + err = -ENODEV; goto err_free_irq; + }
nb8800_pause_adv(dev);
@@@ -1034,6 -1032,7 +1034,6 @@@ static const struct net_device_ops nb88 .ndo_set_mac_address = nb8800_set_mac_address, .ndo_set_rx_mode = nb8800_set_rx_mode, .ndo_do_ioctl = nb8800_ioctl, - .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, };
@@@ -1359,6 -1358,7 +1359,7 @@@ static const struct of_device_id nb8800 }, { } }; + MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
static int nb8800_probe(struct platform_device *pdev) { diff --combined drivers/net/ethernet/broadcom/bcm63xx_enet.c index 7e513ca,5370909..5c7acef --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@@ -1126,7 -1126,8 +1126,8 @@@ out_freeirq free_irq(dev->irq, dev);
out_phy_disconnect: - phy_disconnect(phydev); + if (priv->has_phy) + phy_disconnect(phydev);
return ret; } @@@ -1622,19 -1623,20 +1623,19 @@@ static int bcm_enet_ioctl(struct net_de }
/* - * calculate actual hardware mtu + * adjust mtu, can't be called while device is running */ -static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) +static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) { - int actual_mtu; + struct bcm_enet_priv *priv = netdev_priv(dev); + int actual_mtu = new_mtu;
- actual_mtu = mtu; + if (netif_running(dev)) + return -EBUSY;
/* add ethernet header + vlan tag size */ actual_mtu += VLAN_ETH_HLEN;
- if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) - return -EINVAL; - /* * setup maximum size before we get overflow mark in * descriptor, note that this will not prevent reception of @@@ -1649,7 -1651,22 +1650,7 @@@ */ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, priv->dma_maxburst * 4); - return 0; -}
-/* - * adjust mtu, can't be called while device is running - */ -static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) -{ - int ret; - - if (netif_running(dev)) - return -EBUSY; - - ret = compute_hw_mtu(netdev_priv(dev), new_mtu); - if (ret) - return ret; dev->mtu = new_mtu; return 0; } @@@ -1739,7 -1756,7 +1740,7 @@@ static int bcm_enet_probe(struct platfo priv->enet_is_sw = false; priv->dma_maxburst = BCMENET_DMA_MAXBURST;
- ret = compute_hw_mtu(priv, dev->mtu); + ret = bcm_enet_change_mtu(dev, dev->mtu); if (ret) goto out;
@@@ -1872,9 -1889,6 +1873,9 @@@ netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enet_ethtool_ops; + /* MTU range: 46 - 2028 */ + dev->min_mtu = ETH_ZLEN - ETH_HLEN; + dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev); @@@ -2729,7 -2743,7 +2730,7 @@@ static int bcm_enetsw_probe(struct plat priv->dma_chan_width = pd->dma_chan_width; }
- ret = compute_hw_mtu(priv, dev->mtu); + ret = bcm_enet_change_mtu(dev, dev->mtu); if (ret) goto out;
diff --combined drivers/net/ethernet/broadcom/bnx2.c index 2a5df3f,b3791b3..eab49ff --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@@ -271,22 -271,25 +271,25 @@@ static inline u32 bnx2_tx_avail(struct static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) { + unsigned long flags; u32 val;
- spin_lock_bh(&bp->indirect_lock); + spin_lock_irqsave(&bp->indirect_lock, flags); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); return val; }
static void bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) { - spin_lock_bh(&bp->indirect_lock); + unsigned long flags; + + spin_lock_irqsave(&bp->indirect_lock, flags); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); }
static void @@@ -304,8 -307,10 +307,10 @@@ bnx2_shmem_rd(struct bnx2 *bp, u32 offs static void bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) { + unsigned long flags; + offset += cid_addr; - spin_lock_bh(&bp->indirect_lock); + spin_lock_irqsave(&bp->indirect_lock, flags); if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { int i;
@@@ -322,7 -327,7 +327,7 @@@ BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); BNX2_WR(bp, BNX2_CTX_DATA, val); } - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); }
#ifdef BCM_CNIC @@@ -2298,7 -2303,7 +2303,7 @@@ bnx2_init_5706s_phy(struct bnx2 *bp, in if (BNX2_CHIP(bp) == BNX2_CHIP_5706) BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
- if (bp->dev->mtu > 1500) { + if (bp->dev->mtu > ETH_DATA_LEN) { u32 val;
/* Set extended packet length bit */ @@@ -2352,7 -2357,7 +2357,7 @@@ bnx2_init_copper_phy(struct bnx2 *bp, i bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); }
- if (bp->dev->mtu > 1500) { + if (bp->dev->mtu > ETH_DATA_LEN) { /* Set extended packet length bit */ bnx2_write_phy(bp, 0x18, 0x7); bnx2_read_phy(bp, 0x18, &val); @@@ -4985,12 -4990,12 +4990,12 @@@ bnx2_init_chip(struct bnx2 *bp /* Program the MTU. Also include 4 bytes for CRC32. */ mtu = bp->dev->mtu; val = mtu + ETH_HLEN + ETH_FCS_LEN; - if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) + if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4)) val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
- if (mtu < 1500) - mtu = 1500; + if (mtu < ETH_DATA_LEN) + mtu = ETH_DATA_LEN;
bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); @@@ -7896,6 -7901,10 +7901,6 @@@ bnx2_change_mtu(struct net_device *dev { struct bnx2 *bp = netdev_priv(dev);
- if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) - return -EINVAL; - dev->mtu = new_mtu; return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, false); @@@ -8585,8 -8594,6 +8590,8 @@@ bnx2_init_one(struct pci_dev *pdev, con dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= dev->hw_features; dev->priv_flags |= IFF_UNICAST_FLT; + dev->min_mtu = MIN_ETHERNET_PACKET_SIZE; + dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 67b6180,0cee4c0..ab990da --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -12080,7 -12080,8 +12080,7 @@@ static int bnx2x_get_hwinfo(struct bnx2 mtu_size, mtu);
/* if valid: update device mtu */ - if (((mtu_size + ETH_HLEN) >= - ETH_MIN_PACKET_SIZE) && + if ((mtu_size >= ETH_MIN_PACKET_SIZE) && (mtu_size <= ETH_MAX_JUMBO_PACKET_SIZE)) bp->dev->mtu = mtu_size; @@@ -13314,10 -13315,6 +13314,10 @@@ static int bnx2x_init_dev(struct bnx2x dev->dcbnl_ops = &bnx2x_dcbnl_ops; #endif
+ /* MTU range, 46 - 9600 */ + dev->min_mtu = ETH_MIN_PACKET_SIZE; + dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; + /* get_port_hwinfo() will set prtad and mmds properly */ bp->mdio.prtad = MDIO_PRTAD_NONE; bp->mdio.mmds = 0; @@@ -15244,7 -15241,7 +15244,7 @@@ static void bnx2x_init_cyclecounter(str memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); bp->cyclecounter.read = bnx2x_cyclecounter_read; bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); - bp->cyclecounter.shift = 1; + bp->cyclecounter.shift = 0; bp->cyclecounter.mult = 1; }
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b0bb23f,57eb4e1..c0cc2ee --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -2502,6 -2502,8 +2502,6 @@@ static int cxgb_change_mtu(struct net_d int ret; struct port_info *pi = netdev_priv(dev);
- if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ - return -EINVAL; ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) @@@ -4055,7 -4057,7 +4055,7 @@@ static void cfg_queues(struct adapter * * capped by the number of available cores. */ if (n10g) { - i = num_online_cpus(); + i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); s->ofldqsets = roundup(i, adap->params.nports); } else { s->ofldqsets = adap->params.nports; @@@ -4801,10 -4803,6 +4801,10 @@@ static int init_one(struct pci_dev *pde
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 81 - 9600 */ + netdev->min_mtu = 81; + netdev->max_mtu = MAX_MTU; + netdev->netdev_ops = &cxgb4_netdev_ops; #ifdef CONFIG_CHELSIO_T4_DCB netdev->dcbnl_ops = &cxgb4_dcb_ops; diff --combined drivers/net/ethernet/freescale/fec_main.c index 43b2839,5aa9d4d..01aebc0 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -1430,14 -1430,14 +1430,14 @@@ fec_enet_rx_queue(struct net_device *nd skb_put(skb, pkt_len - 4); data = skb->data;
+ if (!is_copybreak && need_swap) + swap_buffer(data, pkt_len); + #if !defined(CONFIG_M5272) if (fep->quirks & FEC_QUIRK_HAS_RACC) data = skb_pull_inline(skb, 2); #endif
- if (!is_copybreak && need_swap) - swap_buffer(data, pkt_len); - /* Extract the enhanced buffer descriptor */ ebdp = NULL; if (fep->bufdesc_ex) @@@ -1841,11 -1841,11 +1841,11 @@@ static int fec_enet_clk_enable(struct n ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; - if (fep->clk_enet_out) { - ret = clk_prepare_enable(fep->clk_enet_out); - if (ret) - goto failed_clk_enet_out; - } + + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); @@@ -1857,20 -1857,23 +1857,20 @@@ } mutex_unlock(&fep->ptp_clk_mutex); } - if (fep->clk_ref) { - ret = clk_prepare_enable(fep->clk_ref); - if (ret) - goto failed_clk_ref; - } + + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; } else { clk_disable_unprepare(fep->clk_ahb); - if (fep->clk_enet_out) - clk_disable_unprepare(fep->clk_enet_out); + clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; mutex_unlock(&fep->ptp_clk_mutex); } - if (fep->clk_ref) - clk_disable_unprepare(fep->clk_ref); + clk_disable_unprepare(fep->clk_ref); }
return 0; @@@ -3052,6 -3055,7 +3052,6 @@@ static const struct net_device_ops fec_ .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, diff --combined drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index d8e9941,ec8c738..55cbb6c --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@@ -141,9 -141,10 +141,10 @@@ void hns_mac_adjust_link(struct hns_mac *@port_num:port number * */ - static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, - u8 vmid, u8 *port_num) + int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) { + int q_num_per_vf, vf_num_per_port; + int vm_queue_id; u8 tmp_port;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { @@@ -174,6 -175,12 +175,12 @@@ return -EINVAL; }
+ q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf; + vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn; + + vm_queue_id = vmid * q_num_per_vf + + vf_num_per_port * q_num_per_vf * mac_cb->mac_id; + switch (mac_cb->dsaf_dev->dsaf_mode) { case DSAF_MODE_ENABLE_FIX: tmp_port = 0; @@@ -193,7 -200,7 +200,7 @@@ case DSAF_MODE_DISABLE_6PORT_2VM: case DSAF_MODE_DISABLE_6PORT_4VM: case DSAF_MODE_DISABLE_6PORT_16VM: - tmp_port = vmid; + tmp_port = vm_queue_id; break; default: dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n", @@@ -446,7 -453,8 +453,7 @@@ int hns_mac_set_mtu(struct hns_mac_cb * if (mac_cb->mac_type == HNAE_PORT_DEBUG) max_frm = MAC_MAX_MTU_DBG;
- if ((new_mtu < MAC_MIN_MTU) || (new_frm > max_frm) || - (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)) + if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size) return -EINVAL;
if (!drv->config_max_frame_length) diff --combined drivers/net/ethernet/hisilicon/hns/hns_enet.c index a720867,dff7b60..60831a2 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@@ -22,7 -22,6 +22,7 @@@
#include "hnae.h" #include "hns_enet.h" +#include "hns_dsaf_mac.h"
#define NIC_MAX_Q_PER_VF 16 #define HNS_NIC_TX_TIMEOUT (5 * HZ) @@@ -575,7 -574,6 +575,6 @@@ static int hns_nic_poll_rx_skb(struct h struct sk_buff *skb; struct hnae_desc *desc; struct hnae_desc_cb *desc_cb; - struct ethhdr *eh; unsigned char *va; int bnum, length, i; int pull_len; @@@ -601,7 -599,6 +600,6 @@@ ring->stats.sw_err_cnt++; return -ENOMEM; } - skb_reset_mac_header(skb);
prefetchw(skb->data); length = le16_to_cpu(desc->rx.pkt_len); @@@ -683,14 -680,6 +681,6 @@@ out_bnum_err return -EFAULT; }
- /* filter out multicast pkt with the same src mac as this port */ - eh = eth_hdr(skb); - if (unlikely(is_multicast_ether_addr(eh->h_dest) && - ether_addr_equal(ndev->dev_addr, eh->h_source))) { - dev_kfree_skb_any(skb); - return -EFAULT; - } - ring->stats.rx_pkts++; ring->stats.rx_bytes += skb->len;
@@@ -748,25 -737,37 +738,37 @@@ static void hns_nic_rx_up_pro(struct hn ndev->last_rx = jiffies; }
+ static int hns_desc_unused(struct hnae_ring *ring) + { + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; + } + static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int budget, void *v) { struct hnae_ring *ring = ring_data->ring; struct sk_buff *skb; - int num, bnum, ex_num; + int num, bnum; #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns_desc_unused(ring);
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); rmb(); /* make sure num taken effect before the other data is touched */
recv_pkts = 0, recv_bds = 0, clean_count = 0; - recv: + num -= unused_count; + while (recv_pkts < budget && recv_bds < num) { /* reuse or realloc buffers */ - if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns_nic_alloc_rx_buffers(ring_data, clean_count); + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns_nic_alloc_rx_buffers(ring_data, + clean_count + unused_count); clean_count = 0; + unused_count = hns_desc_unused(ring); }
/* poll one pkt */ @@@ -787,21 -788,11 +789,11 @@@ recv_pkts++; }
- /* make all data has been write before submit */ - if (recv_pkts < budget) { - ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - - if (ex_num > clean_count) { - num += ex_num - clean_count; - rmb(); /*complete read rx ring bd number*/ - goto recv; - } - } - out: /* make all data has been write before submit */ - if (clean_count > 0) - hns_nic_alloc_rx_buffers(ring_data, clean_count); + if (clean_count + unused_count > 0) + hns_nic_alloc_rx_buffers(ring_data, + clean_count + unused_count);
return recv_pkts; } @@@ -811,6 -802,8 +803,8 @@@ static void hns_nic_rx_fini_pro(struct struct hnae_ring *ring = ring_data->ring; int num = 0;
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + /* for hardware bug fixed */ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
@@@ -822,6 -815,20 +816,20 @@@ } }
+ static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) + { + struct hnae_ring *ring = ring_data->ring; + int num = 0; + + num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); + + if (num == 0) + ring_data->ring->q->handle->dev->ops->toggle_ring_irq( + ring, 0); + else + napi_schedule(&ring_data->napi); + } + static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, int *bytes, int *pkts) { @@@ -923,7 -930,11 +931,11 @@@ static int hns_nic_tx_poll_one(struct h static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + int head; + + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + + head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) { ring_data->ring->q->handle->dev->ops->toggle_ring_irq( @@@ -933,6 -944,18 +945,18 @@@ } }
+ static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) + { + struct hnae_ring *ring = ring_data->ring; + int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + + if (head == ring->next_to_clean) + ring_data->ring->q->handle->dev->ops->toggle_ring_irq( + ring, 0); + else + napi_schedule(&ring_data->napi); + } + static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; @@@ -964,10 -987,7 +988,7 @@@ static int hns_nic_common_poll(struct n
if (clean_complete >= 0 && clean_complete < budget) { napi_complete(napi); - ring_data->ring->q->handle->dev->ops->toggle_ring_irq( - ring_data->ring, 0); - if (ring_data->fini_process) - ring_data->fini_process(ring_data); + ring_data->fini_process(ring_data); return 0; }
@@@ -1406,6 -1426,10 +1427,6 @@@ static int hns_nic_change_mtu(struct ne struct hnae_handle *h = priv->ae_handle; int ret;
- /* MTU < 68 is an error and causes problems on some kernels */ - if (new_mtu < 68) - return -EINVAL; - if (!h->dev->ops->set_mtu) return -ENOTSUPP;
@@@ -1559,6 -1583,21 +1580,21 @@@ struct rtnl_link_stats64 *hns_nic_get_s return stats; }
+ static u16 + hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) + { + struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; + struct hns_nic_priv *priv = netdev_priv(ndev); + + /* fix hardware broadcast/multicast packets queue loopback */ + if (!AE_IS_VER1(priv->enet_ver) && + is_multicast_ether_addr(eth_hdr->h_dest)) + return 0; + else + return fallback(ndev, skb); + } + static const struct net_device_ops hns_nic_netdev_ops = { .ndo_open = hns_nic_net_open, .ndo_stop = hns_nic_net_stop, @@@ -1574,6 -1613,7 +1610,7 @@@ .ndo_poll_controller = hns_nic_poll_controller, #endif .ndo_set_rx_mode = hns_nic_set_rx_mode, + .ndo_select_queue = hns_nic_select_queue, };
static void hns_nic_update_link_status(struct net_device *netdev) @@@ -1735,7 -1775,8 +1772,8 @@@ static int hns_nic_init_ring_data(struc rd->queue_index = i; rd->ring = &h->qs[i]->tx_ring; rd->poll_one = hns_nic_tx_poll_one; - rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL; + rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : + hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); @@@ -1747,7 -1788,8 +1785,8 @@@ rd->ring = &h->qs[i - h->q_num]->rx_ring; rd->poll_one = hns_nic_rx_poll_one; rd->ex_process = hns_nic_rx_up_pro; - rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL; + rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : + hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); @@@ -1950,20 -1992,14 +1989,20 @@@ static int hns_nic_dev_probe(struct pla NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+ /* MTU range: 68 - 9578 (v1) or 9706 (v2) */ + ndev->min_mtu = MAC_MIN_MTU; switch (priv->enet_ver) { case AE_VERSION_2: ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; + ndev->max_mtu = MAC_MAX_MTU_V2 - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); break; default: + ndev->max_mtu = MAC_MAX_MTU - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); break; }
diff --combined drivers/net/ethernet/ibm/ibmvnic.c index 657206b,5f44c55..d54405b4 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -902,6 -902,17 +902,6 @@@ static int ibmvnic_set_mac(struct net_d return 0; }
-static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - - if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu) - return -EINVAL; - - netdev->mtu = new_mtu; - return 0; -} - static void ibmvnic_tx_timeout(struct net_device *dev) { struct ibmvnic_adapter *adapter = netdev_priv(dev); @@@ -1018,6 -1029,7 +1018,6 @@@ static const struct net_device_ops ibmv .ndo_set_rx_mode = ibmvnic_set_multi, .ndo_set_mac_address = ibmvnic_set_mac, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = ibmvnic_change_mtu, .ndo_tx_timeout = ibmvnic_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmvnic_netpoll_controller, @@@ -1178,7 -1190,7 +1178,7 @@@ static struct ibmvnic_sub_crq_queue *in if (!scrq) return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); + scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2); memset(scrq->msgs, 0, 4 * PAGE_SIZE); if (!scrq->msgs) { dev_warn(dev, "Couldn't allocate crq queue messages page\n"); @@@ -1449,14 -1461,16 +1449,16 @@@ static int init_sub_crq_irqs(struct ibm return rc;
req_rx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } i = adapter->req_tx_queues; req_tx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } release_sub_crqs_no_irqs(adapter); return rc; } @@@ -2626,12 -2640,10 +2628,12 @@@ static void handle_query_cap_rsp(union break; case MIN_MTU: adapter->min_mtu = be64_to_cpu(crq->query_capability.number); + netdev->min_mtu = adapter->min_mtu; netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); break; case MAX_MTU: adapter->max_mtu = be64_to_cpu(crq->query_capability.number); + netdev->max_mtu = adapter->max_mtu; netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); break; case MAX_MULTICAST_FILTERS: @@@ -3222,6 -3234,27 +3224,27 @@@ static void ibmvnic_free_inflight(struc spin_unlock_irqrestore(&adapter->inflight_lock, flags); }
+ static void ibmvnic_xport_event(struct work_struct *work) + { + struct ibmvnic_adapter *adapter = container_of(work, + struct ibmvnic_adapter, + ibmvnic_xport); + struct device *dev = &adapter->vdev->dev; + long rc; + + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + if (adapter->migrated) { + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } + } + static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@@ -3257,15 -3290,7 +3280,7 @@@ if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Re-enabling adapter\n"); adapter->migrated = true; - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); - rc = ibmvnic_reenable_crq_queue(adapter); - if (rc) - dev_err(dev, "Error after enable rc=%ld\n", rc); - adapter->migrated = false; - rc = ibmvnic_send_crq_init(adapter); - if (rc) - dev_err(dev, "Error sending init rc=%ld\n", rc); + schedule_work(&adapter->ibmvnic_xport); } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { dev_info(dev, "Backing device failover detected\n"); netif_carrier_off(netdev); @@@ -3274,8 -3299,7 +3289,7 @@@ /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", gen_crq->cmd); - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); + schedule_work(&adapter->ibmvnic_xport); } return; case IBMVNIC_CRQ_CMD_RSP: @@@ -3644,8 -3668,7 +3658,9 @@@ static void handle_crq_init_rsp(struct goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; + netdev->min_mtu = adapter->min_mtu; + netdev->max_mtu = adapter->max_mtu;
if (adapter->failover) { adapter->failover = false; @@@ -3717,6 -3740,7 +3732,7 @@@ static int ibmvnic_probe(struct vio_de SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); + INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
@@@ -3784,6 -3808,7 +3800,7 @@@ }
netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu;
rc = register_netdev(netdev); if (rc) { diff --combined drivers/net/ethernet/intel/i40e/i40e.h index 01cce5b,6d61e44..5a6f851 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@@ -92,6 -92,7 +92,7 @@@ #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 + #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) @@@ -607,8 -608,6 +608,8 @@@ struct i40e_q_vector unsigned long hung_detected; /* Set/Reset for hung_detection logic */
cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; @@@ -730,6 -729,8 +731,6 @@@ int i40e_sync_vsi_filters(struct i40e_v struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); -struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type, - struct i40e_vsi *start_vsi); #ifdef I40E_FCOE void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 7fa535f,31c97e3..d78a4dc --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -41,7 -41,7 +41,7 @@@ static const char i40e_driver_string[]
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 16 +#define DRV_VERSION_BUILD 21 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@@ -93,8 -93,8 +93,8 @@@ MODULE_DEVICE_TABLE(pci, i40e_pci_tbl)
#define I40E_MAX_VF_COUNT 128 static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
MODULE_AUTHOR("Intel Corporation, e1000-devel@lists.sourceforge.net"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); @@@ -1287,6 -1287,39 +1287,6 @@@ int i40e_del_mac_all_vlan(struct i40e_v }
/** - * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM - * @vsi: the PF Main VSI - inappropriate for any other VSI - * @macaddr: the MAC address - * - * Remove whatever filter the firmware set up so the driver can manage - * its own filtering intelligently. - **/ -static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) -{ - struct i40e_aqc_remove_macvlan_element_data element; - struct i40e_pf *pf = vsi->back; - - /* Only appropriate for the PF main VSI */ - if (vsi->type != I40E_VSI_MAIN) - return; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.vlan_tag = 0; - /* Ignore error returns, some firmware does it this way... */ - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.vlan_tag = 0; - /* ...and some firmware does it this way. */ - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); -} - -/** * i40e_add_filter - Add a mac/vlan filter to the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address @@@ -2206,8 -2239,13 +2206,8 @@@ static void i40e_sync_filters_subtask(s static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct i40e_vsi *vsi = np->vsi;
- /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) - return -EINVAL; - netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; @@@ -3284,33 -3322,6 +3284,33 @@@ static irqreturn_t i40e_msix_clean_ring }
/** + * i40e_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct i40e_q_vector *q_vector = + container_of(notify, struct i40e_q_vector, affinity_notify); + + q_vector->affinity_mask = *mask; +} + +/** + * i40e_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void i40e_irq_affinity_release(struct kref *ref) {} + +/** * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts * @vsi: the VSI being configured * @basename: name for the vector @@@ -3325,13 -3336,10 +3325,13 @@@ static int i40e_vsi_request_irq_msix(st int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; + int irq_num;
for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
+ irq_num = pf->msix_entries[base + vector].vector; + if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "TxRx", rx_int_idx++); @@@ -3346,7 -3354,7 +3346,7 @@@ /* skip this unused q_vector */ continue; } - err = request_irq(pf->msix_entries[base + vector].vector, + err = request_irq(irq_num, vsi->irq_handler, 0, q_vector->name, @@@ -3356,13 -3364,9 +3356,13 @@@ "MSIX request_irq failed, error: %d\n", err); goto free_queue_irqs; } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = i40e_irq_affinity_notify; + q_vector->affinity_notify.release = i40e_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* assign the mask for this irq */ - irq_set_affinity_hint(pf->msix_entries[base + vector].vector, - &q_vector->affinity_mask); + irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); }
vsi->irqs_ready = true; @@@ -3371,10 -3375,10 +3371,10 @@@ free_queue_irqs: while (vector) { vector--; - irq_set_affinity_hint(pf->msix_entries[base + vector].vector, - NULL); - free_irq(pf->msix_entries[base + vector].vector, - &(vsi->q_vectors[vector])); + irq_num = pf->msix_entries[base + vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &vsi->q_vectors[vector]); } return err; } @@@ -4013,23 -4017,19 +4013,23 @@@ static void i40e_vsi_free_irq(struct i4
vsi->irqs_ready = false; for (i = 0; i < vsi->num_q_vectors; i++) { - u16 vector = i + base; + int irq_num; + u16 vector; + + vector = i + base; + irq_num = pf->msix_entries[vector].vector;
/* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || !vsi->q_vectors[i]->num_ringpairs) continue;
+ /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(pf->msix_entries[vector].vector, - NULL); - synchronize_irq(pf->msix_entries[vector].vector); - free_irq(pf->msix_entries[vector].vector, - vsi->q_vectors[i]); + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list * @@@ -4641,29 -4641,6 +4641,6 @@@ static u8 i40e_pf_get_num_tc(struct i40 }
/** - * i40e_pf_get_default_tc - Get bitmap for first enabled TC - * @pf: PF being queried - * - * Return a bitmap for first enabled traffic class for this PF. - **/ - static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) - { - u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; - u8 i = 0; - - if (!enabled_tc) - return 0x1; /* TC0 */ - - /* Find the first enabled TC */ - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT(i)) - break; - } - - return BIT(i); - } - - /** * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried * @@@ -4673,7 -4650,7 +4650,7 @@@ static u8 i40e_pf_get_tc_map(struct i40 { /* If DCB is not enabled for this PF then just return default TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) - return i40e_pf_get_default_tc(pf); + return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) @@@ -4683,7 -4660,7 +4660,7 @@@ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else - return i40e_pf_get_default_tc(pf); + return I40E_DEFAULT_TRAFFIC_CLASS; }
/** @@@ -5029,7 -5006,7 +5006,7 @@@ static void i40e_dcb_reconfigure(struc if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else - tc_map = i40e_pf_get_default_tc(pf); + tc_map = I40E_DEFAULT_TRAFFIC_CLASS; #ifdef I40E_FCOE if (pf->vsi[v]->type == I40E_VSI_FCOE) tc_map = i40e_get_fcoe_tc_map(pf); @@@ -5717,7 -5694,7 +5694,7 @@@ static int i40e_handle_lldp_event(struc u8 type;
/* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return ret;
/* Ignore if event is not for Nearest Bridge */ @@@ -7707,6 -7684,7 +7684,7 @@@ static int i40e_init_msix(struct i40e_p pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); return -ENODEV;
} else if (v_actual == I40E_MIN_MSIX) { @@@ -8367,8 -8345,8 +8345,8 @@@ int i40e_reconfig_rss_queues(struct i40
i40e_pf_config_rss(pf); } - dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", - pf->alloc_rss_size, pf->rss_size_max); + dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", + vsi->req_queue_pairs, pf->rss_size_max); return pf->alloc_rss_size; }
@@@ -8511,6 -8489,15 +8489,6 @@@ static int i40e_sw_init(struct i40e_pf int err = 0; int size;
- pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, - (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); - if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { - if (I40E_DEBUG_USER & debug) - pf->hw.debug_mask = debug; - pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), - I40E_DEFAULT_MSG_ENABLE); - } - /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | @@@ -9047,7 -9034,7 +9025,7 @@@ static int i40e_ndo_bridge_getlink(stru return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, - nlflags, 0, 0, filter_mask, NULL); + 0, 0, nlflags, filter_mask, NULL); }
/* Hardware supports L4 tunnel length of 128B (=2^7) which includes @@@ -9176,6 -9163,12 +9154,6 @@@ static int i40e_config_netdev(struct i4 if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); - /* The following steps are necessary to prevent reception - * of tagged packets - some older NVM configurations load a - * default a MAC-VLAN filter that accepts any tagged packet - * which must be replaced by a normal filter. - */ - i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); spin_unlock_bh(&vsi->mac_filter_list_lock); @@@ -9205,11 -9198,6 +9183,11 @@@ i40e_fcoe_config_netdev(netdev, vsi); #endif
+ /* MTU range: 68 - 9706 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = I40E_MAX_RXBUFFER - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + return 0; }
@@@ -9693,6 -9681,8 +9671,6 @@@ static struct i40e_vsi *i40e_vsi_reinit pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); - if (vsi->type == I40E_VSI_MAIN) - i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
/* assign it some queues */ ret = i40e_alloc_rings(vsi); @@@ -10816,12 -10806,10 +10794,12 @@@ static int i40e_probe(struct pci_dev *p mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex);
- if (debug != -1) { - pf->msg_enable = pf->hw.debug_mask; - pf->msg_enable = debug; - } + pf->msg_enable = netif_msg_init(debug, + NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK); + if (debug < -1) + pf->hw.debug_mask = debug;
/* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && @@@ -10963,7 -10951,7 +10941,7 @@@ err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED); + pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cbd2cfa,bd93d82..5e1f57c --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -5012,23 -5012,24 +5012,23 @@@ fwd_queue_err return err; }
-static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) +static int ixgbe_upper_dev_walk(struct net_device *upper, void *data) { - struct net_device *upper; - struct list_head *iter; - int err; - - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
- if (dfwd->fwd_priv) { - err = ixgbe_fwd_ring_up(upper, vadapter); - if (err) - continue; - } - } + if (dfwd->fwd_priv) + ixgbe_fwd_ring_up(upper, vadapter); } + + return 0; +} + +static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) +{ + netdev_walk_all_upper_dev_rcu(adapter->netdev, + ixgbe_upper_dev_walk, NULL); }
static void ixgbe_configure(struct ixgbe_adapter *adapter) @@@ -5447,25 -5448,12 +5447,25 @@@ static void ixgbe_fdir_filter_exit(stru spin_unlock(&adapter->fdir_perfect_lock); }
+static int ixgbe_disable_macvlan(struct net_device *upper, void *data) +{ + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + + return 0; +} + void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; int i;
/* signal that we are down to the interrupt handler */ @@@ -5489,8 -5477,17 +5489,8 @@@ netif_tx_disable(netdev);
/* disable any upper devices */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) { - netif_tx_stop_all_queues(upper); - netif_carrier_off(upper); - netif_tx_disable(upper); - } - } - } + netdev_walk_all_upper_dev_rcu(adapter->netdev, + ixgbe_disable_macvlan, NULL);
ixgbe_irq_disable(adapter);
@@@ -6052,6 -6049,11 +6052,6 @@@ static void ixgbe_free_all_rx_resources static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - - /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) - return -EINVAL;
/* * For 82599EB we cannot allow legacy VFs to enable their receive @@@ -6060,7 -6062,7 +6060,7 @@@ */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && - (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + (new_mtu > ETH_DATA_LEN)) e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@@ -6726,18 -6728,6 +6726,18 @@@ static void ixgbe_update_default_up(str #endif }
+static int ixgbe_enable_macvlan(struct net_device *upper, void *data) +{ + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + + return 0; +} + /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message @@@ -6747,6 -6737,8 +6747,6 @@@ static void ixgbe_watchdog_link_is_up(s { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; u32 link_speed = adapter->link_speed; const char *speed_str; bool flow_rx, flow_tx; @@@ -6817,8 -6809,14 +6817,8 @@@
/* enable any upper devices */ rtnl_lock(); - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) - netif_tx_wake_all_queues(upper); - } - } + netdev_walk_all_upper_dev_rcu(adapter->netdev, + ixgbe_enable_macvlan, NULL); rtnl_unlock();
/* update the default user priority for VFs */ @@@ -8352,38 -8350,12 +8352,38 @@@ static int ixgbe_configure_clsu32_del_h }
#ifdef CONFIG_NET_CLS_ACT +struct upper_walk_data { + struct ixgbe_adapter *adapter; + u64 action; + int ifindex; + u8 queue; +}; + +static int get_macvlan_queue(struct net_device *upper, void *_data) +{ + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + struct upper_walk_data *data = _data; + struct ixgbe_adapter *adapter = data->adapter; + int ifindex = data->ifindex; + + if (vadapter && vadapter->netdev->ifindex == ifindex) { + data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; + data->action = data->queue; + return 1; + } + } + + return 0; +} + static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, u8 *queue, u64 *action) { unsigned int num_vfs = adapter->num_vfs, vf; + struct upper_walk_data data; struct net_device *upper; - struct list_head *iter;
/* redirect to a SRIOV VF */ for (vf = 0; vf < num_vfs; ++vf) { @@@ -8401,16 -8373,17 +8401,16 @@@ }
/* redirect to a offloaded macvlan netdev */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; - - if (vadapter && vadapter->netdev->ifindex == ifindex) { - *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; - *action = *queue; - return 0; - } - } + data.adapter = adapter; + data.ifindex = ifindex; + data.action = 0; + data.queue = 0; + if (netdev_walk_all_upper_dev_rcu(adapter->netdev, + get_macvlan_queue, &data)) { + *action = data.action; + *queue = data.queue; + + return 0; }
return -EINVAL; @@@ -8437,7 -8410,7 +8437,7 @@@ static int parse_tc_actions(struct ixgb }
/* Redirect to a VF or a offloaded macvlan */ - if (is_tcf_mirred_redirect(a)) { + if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a);
err = handle_redirect_action(adapter, ifindex, queue, @@@ -9162,10 -9135,14 +9162,14 @@@ static void *ixgbe_fwd_add(struct net_d goto fwd_add_err; fwd_adapter->pool = pool; fwd_adapter->real_adapter = adapter; - err = ixgbe_fwd_ring_up(vdev, fwd_adapter); - if (err) - goto fwd_add_err; - netif_tx_start_all_queues(vdev); + + if (netif_running(pdev)) { + err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + if (err) + goto fwd_add_err; + netif_tx_start_all_queues(vdev); + } + return fwd_adapter; fwd_add_err: /* unwind counter and free adapter struct */ @@@ -9635,10 -9612,6 +9639,10 @@@ skip_sriov netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* MTU range: 68 - 9710 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) netdev->dcbnl_ops = &dcbnl_ops; diff --combined drivers/net/ethernet/marvell/mv643xx_eth.c index 68675d8,bf5cc55b..8302c7e --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@@ -384,6 -384,8 +384,6 @@@ struct mv643xx_eth_private
struct net_device *dev;
- struct phy_device *phy; - struct timer_list mib_counters_timer; spinlock_t mib_counters_lock; struct mib_counters mib_counters; @@@ -1234,7 -1236,7 +1234,7 @@@ static void mv643xx_eth_adjust_link(str DISABLE_AUTO_NEG_FOR_FLOW_CTRL | DISABLE_AUTO_NEG_FOR_DUPLEX;
- if (mp->phy->autoneg == AUTONEG_ENABLE) { + if (dev->phydev->autoneg == AUTONEG_ENABLE) { /* enable auto negotiation */ pscr &= ~autoneg_disable; goto out_write; @@@ -1242,7 -1244,7 +1242,7 @@@
pscr |= autoneg_disable;
- if (mp->phy->speed == SPEED_1000) { + if (dev->phydev->speed == SPEED_1000) { /* force gigabit, half duplex not supported */ pscr |= SET_GMII_SPEED_TO_1000; pscr |= SET_FULL_DUPLEX_MODE; @@@ -1251,12 -1253,12 +1251,12 @@@
pscr &= ~SET_GMII_SPEED_TO_1000;
- if (mp->phy->speed == SPEED_100) + if (dev->phydev->speed == SPEED_100) pscr |= SET_MII_SPEED_TO_100; else pscr &= ~SET_MII_SPEED_TO_100;
- if (mp->phy->duplex == DUPLEX_FULL) + if (dev->phydev->duplex == DUPLEX_FULL) pscr |= SET_FULL_DUPLEX_MODE; else pscr &= ~SET_FULL_DUPLEX_MODE; @@@ -1495,69 -1497,55 +1495,69 @@@ static const struct mv643xx_eth_stats m };
static int -mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, - struct ethtool_cmd *cmd) +mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, + struct ethtool_link_ksettings *cmd) { + struct net_device *dev = mp->dev; int err; + u32 supported, advertising;
- err = phy_read_status(mp->phy); + err = phy_read_status(dev->phydev); if (err == 0) - err = phy_ethtool_gset(mp->phy, cmd); + err = phy_ethtool_ksettings_get(dev->phydev, cmd);
/* * The MAC does not support 1000baseT_Half. */ - cmd->supported &= ~SUPPORTED_1000baseT_Half; - cmd->advertising &= ~ADVERTISED_1000baseT_Half; + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + supported &= ~SUPPORTED_1000baseT_Half; + advertising &= ~ADVERTISED_1000baseT_Half; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising);
return err; }
static int -mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, - struct ethtool_cmd *cmd) +mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp, + struct ethtool_link_ksettings *cmd) { u32 port_status; + u32 supported, advertising;
port_status = rdlp(mp, PORT_STATUS);
- cmd->supported = SUPPORTED_MII; - cmd->advertising = ADVERTISED_MII; + supported = SUPPORTED_MII; + advertising = ADVERTISED_MII; switch (port_status & PORT_SPEED_MASK) { case PORT_SPEED_10: - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; break; case PORT_SPEED_100: - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; break; case PORT_SPEED_1000: - ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; default: - cmd->speed = -1; + cmd->base.speed = -1; break; } - cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_MII; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; + cmd->base.duplex = (port_status & FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_MII; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising);
return 0; } @@@ -1565,21 -1553,23 +1565,21 @@@ static void mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { - struct mv643xx_eth_private *mp = netdev_priv(dev); wol->supported = 0; wol->wolopts = 0; - if (mp->phy) - phy_ethtool_get_wol(mp->phy, wol); + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); }
static int mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { - struct mv643xx_eth_private *mp = netdev_priv(dev); int err;
- if (mp->phy == NULL) + if (!dev->phydev) return -EOPNOTSUPP;
- err = phy_ethtool_set_wol(mp->phy, wol); + err = phy_ethtool_set_wol(dev->phydev, wol); /* Given that mv643xx_eth works without the marvell-specific PHY driver, * this debugging hint is useful to have. */ @@@ -1589,38 -1579,31 +1589,38 @@@ }
static int -mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +mv643xx_eth_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev);
- if (mp->phy != NULL) - return mv643xx_eth_get_settings_phy(mp, cmd); + if (dev->phydev) + return mv643xx_eth_get_link_ksettings_phy(mp, cmd); else - return mv643xx_eth_get_settings_phyless(mp, cmd); + return mv643xx_eth_get_link_ksettings_phyless(mp, cmd); }
static int -mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +mv643xx_eth_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - struct mv643xx_eth_private *mp = netdev_priv(dev); + struct ethtool_link_ksettings c = *cmd; + u32 advertising; int ret;
- if (mp->phy == NULL) + if (!dev->phydev) return -EINVAL;
/* * The MAC does not support 1000baseT_Half. */ - cmd->advertising &= ~ADVERTISED_1000baseT_Half; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + c.link_modes.advertising); + advertising &= ~ADVERTISED_1000baseT_Half; + ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising, + advertising);
- ret = phy_ethtool_sset(mp->phy, cmd); + ret = phy_ethtool_ksettings_set(dev->phydev, &c); if (!ret) mv643xx_eth_adjust_link(dev); return ret; @@@ -1639,10 -1622,12 +1639,10 @@@ static void mv643xx_eth_get_drvinfo(str
static int mv643xx_eth_nway_reset(struct net_device *dev) { - struct mv643xx_eth_private *mp = netdev_priv(dev); - - if (mp->phy == NULL) + if (!dev->phydev) return -EINVAL;
- return genphy_restart_aneg(mp->phy); + return genphy_restart_aneg(dev->phydev); }
static int @@@ -1767,6 -1752,8 +1767,6 @@@ static int mv643xx_eth_get_sset_count(s }
static const struct ethtool_ops mv643xx_eth_ethtool_ops = { - .get_settings = mv643xx_eth_get_settings, - .set_settings = mv643xx_eth_set_settings, .get_drvinfo = mv643xx_eth_get_drvinfo, .nway_reset = mv643xx_eth_nway_reset, .get_link = ethtool_op_get_link, @@@ -1780,8 -1767,6 +1780,8 @@@ .get_ts_info = ethtool_op_get_ts_info, .get_wol = mv643xx_eth_get_wol, .set_wol = mv643xx_eth_set_wol, + .get_link_ksettings = mv643xx_eth_get_link_ksettings, + .set_link_ksettings = mv643xx_eth_set_link_ksettings, };
@@@ -2341,21 -2326,19 +2341,21 @@@ static inline void oom_timer_wrapper(un
static void port_start(struct mv643xx_eth_private *mp) { + struct net_device *dev = mp->dev; u32 pscr; int i;
/* * Perform PHY reset, if there is a PHY. */ - if (mp->phy != NULL) { - struct ethtool_cmd cmd; + if (dev->phydev) { + struct ethtool_link_ksettings cmd;
- mv643xx_eth_get_settings(mp->dev, &cmd); - phy_init_hw(mp->phy); - mv643xx_eth_set_settings(mp->dev, &cmd); - phy_start(mp->phy); + mv643xx_eth_get_link_ksettings(dev, &cmd); + phy_init_hw(dev->phydev); + mv643xx_eth_set_link_ksettings( + dev, (const struct ethtool_link_ksettings *)&cmd); + phy_start(dev->phydev); }
/* @@@ -2367,7 -2350,7 +2367,7 @@@ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
pscr |= DO_NOT_FORCE_LINK_FAIL; - if (mp->phy == NULL) + if (!dev->phydev) pscr |= FORCE_LINK_PASS; wrlp(mp, PORT_SERIAL_CONTROL, pscr);
@@@ -2551,8 -2534,8 +2551,8 @@@ static int mv643xx_eth_stop(struct net_ del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev); - if (mp->phy) - phy_stop(mp->phy); + if (dev->phydev) + phy_stop(dev->phydev); free_irq(dev->irq, dev);
port_reset(mp); @@@ -2570,12 -2553,13 +2570,12 @@@
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct mv643xx_eth_private *mp = netdev_priv(dev); int ret;
- if (mp->phy == NULL) + if (!dev->phydev) return -ENOTSUPP;
- ret = phy_mii_ioctl(mp->phy, ifr, cmd); + ret = phy_mii_ioctl(dev->phydev, ifr, cmd); if (!ret) mv643xx_eth_adjust_link(dev); return ret; @@@ -2585,6 -2569,9 +2585,6 @@@ static int mv643xx_eth_change_mtu(struc { struct mv643xx_eth_private *mp = netdev_priv(dev);
- if (new_mtu < 64 || new_mtu > 9500) - return -EINVAL; - dev->mtu = new_mtu; mv643xx_eth_recalc_skb_size(mp); tx_set_rate(mp, 1000000000, 16777216); @@@ -2981,6 -2968,22 +2981,22 @@@ static void set_params(struct mv643xx_e mp->txq_count = pd->tx_queue_count ? : 1; }
+ static int get_phy_mode(struct mv643xx_eth_private *mp) + { + struct device *dev = mp->dev->dev.parent; + int iface = -1; + + if (dev->of_node) + iface = of_get_phy_mode(dev->of_node); + + /* Historical default if unspecified. We could also read/write + * the interface state in the PSC1 + */ + if (iface < 0) + iface = PHY_INTERFACE_MODE_GMII; + return iface; + } + static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { @@@ -3007,7 -3010,7 +3023,7 @@@ "orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, - PHY_INTERFACE_MODE_GMII); + get_phy_mode(mp)); if (!IS_ERR(phydev)) { phy_addr_set(mp, addr); break; @@@ -3019,8 -3022,7 +3035,8 @@@
static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) { - struct phy_device *phy = mp->phy; + struct net_device *dev = mp->dev; + struct phy_device *phy = dev->phydev;
if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; @@@ -3038,7 -3040,6 +3054,7 @@@
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) { + struct net_device *dev = mp->dev; u32 pscr;
pscr = rdlp(mp, PORT_SERIAL_CONTROL); @@@ -3048,7 -3049,7 +3064,7 @@@ }
pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; - if (mp->phy == NULL) { + if (!dev->phydev) { pscr |= DISABLE_AUTO_NEG_SPEED_GMII; if (speed == SPEED_1000) pscr |= SET_GMII_SPEED_TO_1000; @@@ -3087,7 -3088,6 +3103,7 @@@ static int mv643xx_eth_probe(struct pla struct mv643xx_eth_platform_data *pd; struct mv643xx_eth_private *mp; struct net_device *dev; + struct phy_device *phydev = NULL; struct resource *res; int err;
@@@ -3106,6 -3106,7 +3122,7 @@@ if (!dev) return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev); mp = netdev_priv(dev); platform_set_drvdata(pdev, mp);
@@@ -3143,18 -3144,18 +3160,18 @@@
err = 0; if (pd->phy_node) { - mp->phy = of_phy_connect(mp->dev, pd->phy_node, - mv643xx_eth_adjust_link, 0, - get_phy_mode(mp)); - if (!mp->phy) + phydev = of_phy_connect(mp->dev, pd->phy_node, + mv643xx_eth_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); ++ get_phy_mode(mp)); + if (!phydev) err = -ENODEV; else - phy_addr_set(mp, mp->phy->mdio.addr); + phy_addr_set(mp, phydev->mdio.addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { - mp->phy = phy_scan(mp, pd->phy_addr); + phydev = phy_scan(mp, pd->phy_addr);
- if (IS_ERR(mp->phy)) - err = PTR_ERR(mp->phy); + if (IS_ERR(phydev)) + err = PTR_ERR(phydev); else phy_init(mp, pd->speed, pd->duplex); } @@@ -3203,12 -3204,6 +3220,15 @@@ dev->priv_flags |= IFF_UNICAST_FLT; dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
++<<<<<<< HEAD + /* MTU range: 64 - 9500 */ + dev->min_mtu = 64; + dev->max_mtu = 9500; + + SET_NETDEV_DEV(dev, &pdev->dev); + ++======= ++>>>>>>> 2a26d99b251b8625d27aed14e97fc10707a3a81f if (mp->shared->win_protect) wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
@@@ -3242,11 -3237,10 +3262,11 @@@ out static int mv643xx_eth_remove(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + struct net_device *dev = mp->dev;
unregister_netdev(mp->dev); - if (mp->phy != NULL) - phy_disconnect(mp->phy); + if (dev->phydev) + phy_disconnect(dev->phydev); cancel_work_sync(&mp->tx_timeout_task);
if (!IS_ERR(mp->clk)) diff --combined drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bf35ac4,12c99a2..58b749d --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@@ -1733,6 -1733,13 +1733,13 @@@ int mlx4_en_start_port(struct net_devic udp_tunnel_get_rx_info(dev);
priv->port_up = true; + + /* Process all completions if exist to prevent + * the queues freezing if they are full + */ + for (i = 0; i < priv->rx_ring_num; i++) + napi_schedule(&priv->rx_cq[i]->napi); + netif_tx_start_all_queues(dev); netif_device_attach(dev);
@@@ -1910,8 -1917,9 +1917,9 @@@ static void mlx4_en_clear_stats(struct struct mlx4_en_dev *mdev = priv->mdev; int i;
- if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) - en_dbg(HW, priv, "Failed dumping statistics\n"); + if (!mlx4_is_slave(mdev->dev)) + if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) + en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); @@@ -2194,6 -2202,7 +2202,7 @@@ void mlx4_en_destroy_netdev(struct net_
if (!shutdown) free_netdev(dev); + dev->ethtool_ops = NULL; }
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) @@@ -2205,6 -2214,10 +2214,6 @@@ en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", dev->mtu, new_mtu);
- if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { - en_err(priv, "Bad MTU size:%d.\n", new_mtu); - return -EPERM; - } if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { en_err(priv, "MTU size:%d requires frags but XDP running\n", new_mtu); @@@ -3284,10 -3297,6 +3293,10 @@@ int mlx4_en_init_netdev(struct mlx4_en_ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; }
+ /* MTU range: 46 - hw-specific max */ + dev->min_mtu = MLX4_EN_MIN_MTU; + dev->max_mtu = priv->max_mtu; + mdev->pndev[port] = dev; mdev->upper[port] = NULL;
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 03183eb,f4c687c..d3f13b5 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -1971,9 -1971,7 +1971,7 @@@ static void mlx5e_build_tir_ctx_lro(voi MLX5_SET(tirc, tirc, lro_max_ip_payload_size, (priv->params.lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, - MLX5_CAP_ETH(priv->mdev, - lro_timer_supported_periods[2])); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); }
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) @@@ -2851,13 -2849,31 +2849,13 @@@ static int mlx5e_set_features(struct ne return err ? -EINVAL : 0; }
-#define MXL5_HW_MIN_MTU 64 -#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) - static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; bool was_opened; - u16 max_mtu; - u16 min_mtu; int err = 0; bool reset;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1); - - max_mtu = MLX5E_HW2SW_MTU(max_mtu); - min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); - - if (new_mtu > max_mtu || new_mtu < min_mtu) { - netdev_err(netdev, - "%s: Bad MTU (%d), valid range is: [%d..%d]\n", - __func__, new_mtu, min_mtu, max_mtu); - return -EINVAL; - } - mutex_lock(&priv->state_lock);
reset = !priv->params.lro_en && @@@ -3383,6 -3399,18 +3381,18 @@@ static void mlx5e_query_min_inline(stru } }
+ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) + { + int i; + + /* The supported periods are organized in ascending order */ + for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++) + if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout) + break; + + return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); + } + static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile, @@@ -3401,6 -3429,9 +3411,9 @@@ priv->profile = profile; priv->ppriv = ppriv;
+ priv->params.lro_timeout = + mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); + priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */ @@@ -3817,7 -3848,6 +3830,7 @@@ int mlx5e_attach_netdev(struct mlx5_cor { const struct mlx5e_profile *profile; struct mlx5e_priv *priv; + u16 max_mtu; int err;
priv = netdev_priv(netdev); @@@ -3848,11 -3878,6 +3861,11 @@@
mlx5e_init_l2_addr(priv);
+ /* MTU range: 68 - hw-specific max */ + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu); + mlx5e_set_dev_port_mtu(netdev);
if (profile->enable) @@@ -4023,7 -4048,6 +4036,6 @@@ void mlx5e_destroy_netdev(struct mlx5_c const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev); destroy_workqueue(priv->wq); if (profile->cleanup) profile->cleanup(priv); @@@ -4040,6 -4064,7 +4052,7 @@@ static void mlx5e_remove(struct mlx5_co for (vport = 1; vport < total_vfs; vport++) mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev); mlx5e_detach(mdev, vpriv); mlx5e_destroy_netdev(mdev, priv); } diff --combined drivers/net/ethernet/mellanox/mlxsw/pci.c index 63d89f7,912f71f..d5cf1ea --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@@ -48,17 -48,33 +48,17 @@@ #include <linux/seq_file.h> #include <linux/string.h>
+#include "pci_hw.h" #include "pci.h" #include "core.h" #include "cmd.h" #include "port.h" +#include "resources.h"
static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-static const struct pci_device_id mlxsw_pci_id_table[] = { - {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, - {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, - {0, } -}; - static struct dentry *mlxsw_pci_dbg_root;
-static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id) -{ - switch (id->device) { - case PCI_DEVICE_ID_MELLANOX_SWITCHX2: - return MLXSW_DEVICE_KIND_SWITCHX2; - case PCI_DEVICE_ID_MELLANOX_SPECTRUM: - return MLXSW_DEVICE_KIND_SPECTRUM; - default: - BUG(); - } -} - #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) #define mlxsw_pci_read32(mlxsw_pci, reg) \ @@@ -222,9 -238,8 +222,9 @@@ static bool mlxsw_pci_elem_hw_owned(str return owner_bit != !!(q->consumer_counter & q->count); }
-static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, - u32 (*get_elem_owner_func)(char *)) +static char * +mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, + u32 (*get_elem_owner_func)(const char *)) { struct mlxsw_pci_queue_elem_info *elem_info; char *elem; @@@ -1139,8 -1154,76 +1139,8 @@@ mlxsw_pci_config_profile_swid_config(st mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); }
-#define MLXSW_RESOURCES_TABLE_END_ID 0xffff -#define MLXSW_MAX_SPAN_ID 0x2420 -#define MLXSW_MAX_LAG_ID 0x2520 -#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521 -#define MLXSW_KVD_SIZE_ID 0x1001 -#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002 -#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003 -#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01 -#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502 -#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906 -#define MLXSW_MAX_REGIONS_ID 0x2901 -#define MLXSW_MAX_RIF_ID 0x2C02 -#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100 -#define MLXSW_RESOURCES_PER_QUERY 32 - -static void mlxsw_pci_resources_query_parse(int id, u64 val, - struct mlxsw_resources *resources) -{ - switch (id) { - case MLXSW_MAX_SPAN_ID: - resources->max_span = val; - resources->max_span_valid = 1; - break; - case MLXSW_MAX_LAG_ID: - resources->max_lag = val; - resources->max_lag_valid = 1; - break; - case MLXSW_MAX_PORTS_IN_LAG_ID: - resources->max_ports_in_lag = val; - resources->max_ports_in_lag_valid = 1; - break; - case MLXSW_KVD_SIZE_ID: - resources->kvd_size = val; - resources->kvd_size_valid = 1; - break; - case MLXSW_KVD_SINGLE_MIN_SIZE_ID: - resources->kvd_single_min_size = val; - resources->kvd_single_min_size_valid = 1; - break; - case MLXSW_KVD_DOUBLE_MIN_SIZE_ID: - resources->kvd_double_min_size = val; - resources->kvd_double_min_size_valid = 1; - break; - case MLXSW_MAX_VIRTUAL_ROUTERS_ID: - resources->max_virtual_routers = val; - resources->max_virtual_routers_valid = 1; - break; - case MLXSW_MAX_SYSTEM_PORT_ID: - resources->max_system_ports = val; - resources->max_system_ports_valid = 1; - break; - case MLXSW_MAX_VLAN_GROUPS_ID: - resources->max_vlan_groups = val; - resources->max_vlan_groups_valid = 1; - break; - case MLXSW_MAX_REGIONS_ID: - resources->max_regions = val; - resources->max_regions_valid = 1; - break; - case MLXSW_MAX_RIF_ID: - resources->max_rif = val; - resources->max_rif_valid = 1; - break; - default: - break; - } -} - static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_resources *resources, + struct mlxsw_res *res, u8 query_enabled) { int index, i; @@@ -1154,20 -1237,19 +1154,20 @@@
mlxsw_cmd_mbox_zero(mbox);
- for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) { + for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; + index++) { err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); if (err) return err;
- for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) { + for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
- if (id == MLXSW_RESOURCES_TABLE_END_ID) + if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) return 0;
- mlxsw_pci_resources_query_parse(id, data, resources); + mlxsw_res_parse(res, id, data); } }
@@@ -1177,14 -1259,13 +1177,14 @@@ return -EIO; }
-static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources) +static int +mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile, + struct mlxsw_res *res) { - u32 singles_size, doubles_size, linear_size; + u32 single_size, double_size, linear_size;
- if (!resources->kvd_single_min_size_valid || - !resources->kvd_double_min_size_valid || + if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) || + !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) || !profile->used_kvd_split_data) return -EIO;
@@@ -1196,31 -1277,31 +1196,31 @@@ * Both sizes must be a multiplications of the * granularity from the profile. */ - doubles_size = (resources->kvd_size - linear_size); - doubles_size *= profile->kvd_hash_double_parts; - doubles_size /= (profile->kvd_hash_double_parts + - profile->kvd_hash_single_parts); - doubles_size /= profile->kvd_hash_granularity; - doubles_size *= profile->kvd_hash_granularity; - singles_size = resources->kvd_size - doubles_size - - linear_size; + double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size; + double_size *= profile->kvd_hash_double_parts; + double_size /= profile->kvd_hash_double_parts + + profile->kvd_hash_single_parts; + double_size /= profile->kvd_hash_granularity; + double_size *= profile->kvd_hash_granularity; + single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size - + linear_size;
/* Check results are legal. */ - if (singles_size < resources->kvd_single_min_size || - doubles_size < resources->kvd_double_min_size || - resources->kvd_size < linear_size) + if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) || + double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) || + MLXSW_RES_GET(res, KVD_SIZE) < linear_size) return -EIO;
- resources->kvd_single_size = singles_size; - resources->kvd_double_size = doubles_size; - resources->kvd_linear_size = linear_size; + MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size); + MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size); + MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
return 0; }
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources) + struct mlxsw_res *res) { int i; int err; @@@ -1309,22 -1390,22 +1309,22 @@@ mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( mbox, profile->adaptive_routing_group_cap); } - if (resources->kvd_size_valid) { - err = mlxsw_pci_profile_get_kvd_sizes(profile, resources); + if (MLXSW_RES_VALID(res, KVD_SIZE)) { + err = mlxsw_pci_profile_get_kvd_sizes(profile, res); if (err) return err;
mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1); mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox, - resources->kvd_linear_size); + MLXSW_RES_GET(res, KVD_LINEAR_SIZE)); mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox, 1); mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox, - resources->kvd_single_size); + MLXSW_RES_GET(res, KVD_SINGLE_SIZE)); mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( mbox, 1); mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox, - resources->kvd_double_size); + MLXSW_RES_GET(res, KVD_DOUBLE_SIZE)); }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) @@@ -1462,7 -1543,7 +1462,7 @@@ static void mlxsw_pci_mbox_free(struct
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources) + struct mlxsw_res *res) { struct mlxsw_pci *mlxsw_pci = bus_priv; struct pci_dev *pdev = mlxsw_pci->pdev; @@@ -1521,12 -1602,12 +1521,12 @@@ if (err) goto err_boardinfo;
- err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources, + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res, profile->resource_query_enable); if (err) goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources); + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); if (err) goto err_config_profile;
@@@ -1536,7 -1617,7 +1536,7 @@@
err = request_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci_eq_irq_handler, 0, - mlxsw_pci_driver_name, mlxsw_pci); + mlxsw_pci->bus_info.device_kind, mlxsw_pci); if (err) { dev_err(&pdev->dev, "IRQ request failed\n"); goto err_request_eq_irq; @@@ -1757,11 -1838,17 +1757,17 @@@ static const struct mlxsw_bus mlxsw_pci .cmd_exec = mlxsw_pci_cmd_exec, };
- static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) + static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) { unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); + if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; + } + wmb(); /* reset needs to be written before we read control register */ end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { @@@ -1776,7 -1863,6 +1782,7 @@@
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + const char *driver_name = pdev->driver->name; struct mlxsw_pci *mlxsw_pci; int err;
@@@ -1790,7 -1876,7 +1796,7 @@@ goto err_pci_enable_device; }
- err = pci_request_regions(pdev, mlxsw_pci_driver_name); + err = pci_request_regions(pdev, driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto err_pci_request_regions; @@@ -1829,7 -1915,7 +1835,7 @@@ mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci); + err = mlxsw_pci_sw_reset(mlxsw_pci, id); if (err) { dev_err(&pdev->dev, "Software reset failed\n"); goto err_sw_reset; @@@ -1841,7 -1927,7 +1847,7 @@@ goto err_msix_init; }
- mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id); + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev;
@@@ -1893,30 -1979,33 +1899,30 @@@ static void mlxsw_pci_remove(struct pci kfree(mlxsw_pci); }
-static struct pci_driver mlxsw_pci_driver = { - .name = mlxsw_pci_driver_name, - .id_table = mlxsw_pci_id_table, - .probe = mlxsw_pci_probe, - .remove = mlxsw_pci_remove, -}; +int mlxsw_pci_driver_register(struct pci_driver *pci_driver) +{ + pci_driver->probe = mlxsw_pci_probe; + pci_driver->remove = mlxsw_pci_remove; + return pci_register_driver(pci_driver); +} +EXPORT_SYMBOL(mlxsw_pci_driver_register);
-static int __init mlxsw_pci_module_init(void) +void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver) { - int err; + pci_unregister_driver(pci_driver); +} +EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
+static int __init mlxsw_pci_module_init(void) +{ mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); if (!mlxsw_pci_dbg_root) return -ENOMEM; - err = pci_register_driver(&mlxsw_pci_driver); - if (err) - goto err_register_driver; return 0; - -err_register_driver: - debugfs_remove_recursive(mlxsw_pci_dbg_root); - return err; }
static void __exit mlxsw_pci_module_exit(void) { - pci_unregister_driver(&mlxsw_pci_driver); debugfs_remove_recursive(mlxsw_pci_dbg_root); }
@@@ -1926,3 -2015,4 +1932,3 @@@ module_exit(mlxsw_pci_module_exit) MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko jiri@mellanox.com"); MODULE_DESCRIPTION("Mellanox switch PCI interface driver"); -MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table); diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 107934f,4573da2..348c773 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@@ -320,6 -320,8 +320,8 @@@ mlxsw_sp_lpm_tree_create(struct mlxsw_s lpm_tree); if (err) goto err_left_struct_set; + memcpy(&lpm_tree->prefix_usage, prefix_usage, + sizeof(lpm_tree->prefix_usage)); return lpm_tree;
err_left_struct_set: @@@ -343,7 -345,8 +345,8 @@@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { lpm_tree = &mlxsw_sp->router.lpm_trees[i]; - if (lpm_tree->proto == proto && + if (lpm_tree->ref_count != 0 && + lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, prefix_usage)) goto inc_ref_count; @@@ -379,10 -382,12 +382,10 @@@ static void mlxsw_sp_lpm_init(struct ml
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; struct mlxsw_sp_vr *vr; int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; if (!vr->used) return vr; @@@ -424,12 -429,14 +427,12 @@@ static struct mlxsw_sp_vr *mlxsw_sp_vr_ u32 tb_id, enum mlxsw_sp_l3proto proto) { - struct mlxsw_resources *resources; struct mlxsw_sp_vr *vr; int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; if (vr->used && vr->proto == proto && vr->tb_id == tb_id) return vr; @@@ -565,20 -572,21 +568,20 @@@ static void mlxsw_sp_vr_put(struct mlxs
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; struct mlxsw_sp_vr *vr; + u64 max_vrs; int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core); - if (!resources->max_virtual_routers_valid) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS)) return -EIO;
- mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers, - sizeof(struct mlxsw_sp_vr), + max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); + mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr), GFP_KERNEL); if (!mlxsw_sp->router.vrs) return -ENOMEM;
- for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < max_vrs; i++) { vr = &mlxsw_sp->router.vrs[i]; vr->id = i; } @@@ -1815,19 -1823,17 +1818,17 @@@ err_fib_entry_insert return err; }
- static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, - struct fib_entry_notifier_info *fen_info) + static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, + struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted) - return 0; + return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); - if (!fib_entry) { - dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); - return -ENOENT; - } + if (!fib_entry) + return;
if (fib_entry->ref_count == 1) { mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); @@@ -1835,7 -1841,6 +1836,6 @@@ }
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); - return 0; }
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) @@@ -1857,7 -1862,8 +1857,8 @@@ if (err) return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0); + mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_SP_LPM_TREE_MIN); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); if (err) return err; @@@ -1870,13 -1876,15 +1871,13 @@@
static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_entry *tmp; struct mlxsw_sp_vr *vr; int i; int err;
- resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; if (!vr->used) continue; @@@ -1901,21 -1909,21 +1902,21 @@@
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; char rgcr_pl[MLXSW_REG_RGCR_LEN]; + u64 max_rifs; int err;
- resources = mlxsw_core_resources_get(mlxsw_sp->core); - if (!resources->max_rif_valid) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) return -EIO;
- mlxsw_sp->rifs = kcalloc(resources->max_rif, - sizeof(struct mlxsw_sp_rif *), GFP_KERNEL); + max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), + GFP_KERNEL); if (!mlxsw_sp->rifs) return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true); - mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif); + mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) goto err_rgcr_fail; @@@ -1929,13 -1937,15 +1930,13 @@@ err_rgcr_fail
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; char rgcr_pl[MLXSW_REG_RGCR_LEN]; int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_rif; i++) + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) WARN_ON_ONCE(mlxsw_sp->rifs[i]);
kfree(mlxsw_sp->rifs); @@@ -1980,7 -1990,7 +1981,7 @@@ int mlxsw_sp_router_init(struct mlxsw_s if (err) goto err_vrs_init;
- err = mlxsw_sp_neigh_init(mlxsw_sp); + err = mlxsw_sp_neigh_init(mlxsw_sp); if (err) goto err_neigh_init;
diff --combined drivers/net/ethernet/mellanox/mlxsw/switchx2.c index dc3c5ed,92bda87..8c8f5d8 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@@ -37,7 -37,6 +37,7 @@@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> +#include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/slab.h> @@@ -47,7 -46,6 +47,7 @@@ #include <net/switchdev.h> #include <generated/utsrelease.h>
+#include "pci.h" #include "core.h" #include "reg.h" #include "port.h" @@@ -412,7 -410,7 +412,7 @@@ static void mlxsw_sx_port_get_drvinfo(s
struct mlxsw_sx_port_hw_stats { char str[ETH_GSTRING_LEN]; - u64 (*getter)(char *payload); + u64 (*getter)(const char *payload); };
static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { @@@ -968,7 -966,6 +968,7 @@@ static int mlxsw_sx_port_create(struct dev = alloc_etherdev(sizeof(struct mlxsw_sx_port)); if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev); mlxsw_sx_port = netdev_priv(dev); mlxsw_sx_port->dev = dev; mlxsw_sx_port->mlxsw_sx = mlxsw_sx; @@@ -997,9 -994,6 +997,9 @@@ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | NETIF_F_VLAN_CHALLENGED;
+ dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU; + /* Each packet needs to have a Tx header (metadata) on top all other * headers. */ @@@ -1094,6 -1088,7 +1094,7 @@@ err_port_stp_state_set err_port_admin_status_set: err_port_mtu_set: err_port_speed_set: + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: err_port_system_port_mapping_set: port_not_usable: @@@ -1547,7 -1542,8 +1548,7 @@@ static struct mlxsw_config_profile mlxs };
static struct mlxsw_driver mlxsw_sx_driver = { - .kind = MLXSW_DEVICE_KIND_SWITCHX2, - .owner = THIS_MODULE, + .kind = mlxsw_sx_driver_name, .priv_size = sizeof(struct mlxsw_sx), .init = mlxsw_sx_init, .fini = mlxsw_sx_fini, @@@ -1556,38 -1552,13 +1557,38 @@@ .profile = &mlxsw_sx_config_profile, };
+static const struct pci_device_id mlxsw_sx_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sx_pci_driver = { + .name = mlxsw_sx_driver_name, + .id_table = mlxsw_sx_pci_id_table, +}; + static int __init mlxsw_sx_module_init(void) { - return mlxsw_core_driver_register(&mlxsw_sx_driver); + int err; + + err = mlxsw_core_driver_register(&mlxsw_sx_driver); + if (err) + return err; + + err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver); + if (err) + goto err_pci_driver_register; + + return 0; + +err_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sx_driver); + return err; }
static void __exit mlxsw_sx_module_exit(void) { + mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sx_driver); }
@@@ -1597,4 -1568,4 +1598,4 @@@ module_exit(mlxsw_sx_module_exit) MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko jiri@mellanox.com"); MODULE_DESCRIPTION("Mellanox SwitchX-2 driver"); -MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2); +MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table); diff --combined drivers/net/ethernet/qlogic/qed/qed_roce.c index b11beb5,f3a825a..6a353ff --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@@ -129,17 -129,12 +129,12 @@@ static void qed_bmap_release_id(struct } }
- u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) + static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) { /* First sb id for RoCE is after all the l2 sb */ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; }
- u32 qed_rdma_query_cau_timer_res(void *rdma_cxt) - { - return QED_CAU_DEF_RX_TIMER_RES; - } - static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_rdma_start_in_params *params) @@@ -162,7 -157,8 +157,8 @@@ p_hwfn->p_rdma_info = p_rdma_info; p_rdma_info->proto = PROTOCOLID_ROCE;
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, + NULL);
p_rdma_info->num_qps = num_cons / 2;
@@@ -275,7 -271,7 +271,7 @@@ free_rdma_info return rc; }
- void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) + static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@@ -527,6 -523,26 +523,26 @@@ static int qed_rdma_start_fw(struct qed return qed_spq_post(p_hwfn, p_ent, NULL); }
+ static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) + { + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) + goto out; + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); + out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); + return rc; + } + static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) { struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; @@@ -573,7 -589,7 +589,7 @@@ static int qed_rdma_setup(struct qed_hw return qed_rdma_start_fw(p_hwfn, params, p_ptt); }
- int qed_rdma_stop(void *rdma_cxt) + static int qed_rdma_stop(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_close_func_ramrod_data *p_ramrod; @@@ -629,8 -645,8 +645,8 @@@ out return rc; }
- int qed_rdma_add_user(void *rdma_cxt, - struct qed_rdma_add_user_out_params *out_params) + static int qed_rdma_add_user(void *rdma_cxt, + struct qed_rdma_add_user_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 dpi_start_offset; @@@ -664,7 -680,7 +680,7 @@@ return rc; }
- struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) + static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; @@@ -680,7 -696,7 +696,7 @@@ return p_port; }
- struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) + static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@@ -690,7 -706,7 +706,7 @@@ return p_hwfn->p_rdma_info->dev; }
- void qed_rdma_free_tid(void *rdma_cxt, u32 itid) + static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@@ -701,27 -717,7 +717,7 @@@ spin_unlock_bh(&p_hwfn->p_rdma_info->lock); }
- int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) - { - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); - - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, - &p_hwfn->p_rdma_info->tid_map, itid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - if (rc) - goto out; - - rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); - out: - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); - return rc; - } - - void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) + static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) { struct qed_hwfn *p_hwfn; u16 qz_num; @@@ -816,7 -812,7 +812,7 @@@ static int qed_rdma_get_int(struct qed_ return 0; }
- int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) + static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 returned_id; @@@ -836,7 -832,7 +832,7 @@@ return rc; }
- void qed_rdma_free_pd(void *rdma_cxt, u16 pd) + static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@@ -873,8 -869,9 +869,9 @@@ qed_rdma_toggle_bit_create_resize_cq(st return toggle_bit; }
- int qed_rdma_create_cq(void *rdma_cxt, - struct qed_rdma_create_cq_in_params *params, u16 *icid) + static int qed_rdma_create_cq(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; @@@ -957,98 -954,10 +954,10 @@@ err return rc; }
- int qed_rdma_resize_cq(void *rdma_cxt, - struct qed_rdma_resize_cq_in_params *in_params, - struct qed_rdma_resize_cq_out_params *out_params) - { - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct rdma_resize_cq_output_params *p_ramrod_res; - struct rdma_resize_cq_ramrod_data *p_ramrod; - enum qed_rdma_toggle_bit toggle_bit; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - dma_addr_t ramrod_res_phys; - u8 fw_return_code; - int rc = -ENOMEM; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); - - p_ramrod_res = - (struct rdma_resize_cq_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_resize_cq_output_params), - &ramrod_res_phys, GFP_KERNEL); - if (!p_ramrod_res) { - DP_NOTICE(p_hwfn, - "qed resize cq failed: cannot allocate memory (ramrod)\n"); - return rc; - } - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.cid = in_params->icid; - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - rc = qed_sp_init_request(p_hwfn, &p_ent, - RDMA_RAMROD_RESIZE_CQ, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) - goto err; - - p_ramrod = &p_ent->ramrod.rdma_resize_cq; - - p_ramrod->flags = 0; - - /* toggle the bit for every resize or create cq for a given icid */ - toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, - in_params->icid); - - SET_FIELD(p_ramrod->flags, - RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit); - - SET_FIELD(p_ramrod->flags, - RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL, - in_params->pbl_two_level); - - p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12; - p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages); - p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size); - DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr); - DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); - - rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); - if (rc) - goto err; - - if (fw_return_code != RDMA_RETURN_OK) { - DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); - rc = -EINVAL; - goto err; - } - - out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod); - out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons); - - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_resize_cq_output_params), - p_ramrod_res, ramrod_res_phys); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc); - - return rc; - - err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_resize_cq_output_params), - p_ramrod_res, ramrod_res_phys); - DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc); - - return rc; - } - - int qed_rdma_destroy_cq(void *rdma_cxt, - struct qed_rdma_destroy_cq_in_params *in_params, - struct qed_rdma_destroy_cq_out_params *out_params) + static int + qed_rdma_destroy_cq(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *in_params, + struct qed_rdma_destroy_cq_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_destroy_cq_output_params *p_ramrod_res; @@@ -1169,7 -1078,7 +1078,7 @@@ static enum roce_flavor qed_roce_mode_t return flavor; }
- int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) + static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 responder_icid; @@@ -1793,9 -1702,9 +1702,9 @@@ err return rc; }
- int qed_roce_query_qp(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params) + static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) { struct roce_query_qp_resp_output_params *p_resp_ramrod_res; struct roce_query_qp_req_output_params *p_req_ramrod_res; @@@ -1936,7 -1845,7 +1845,7 @@@ err_resp return rc; }
- int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) + static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { u32 num_invalidated_mw = 0; u32 num_bound_mw = 0; @@@ -1985,9 -1894,9 +1894,9 @@@ return 0; }
- int qed_rdma_query_qp(void *rdma_cxt, - struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params) + static int qed_rdma_query_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc; @@@ -2022,7 -1931,7 +1931,7 @@@ return rc; }
- int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) + static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc = 0; @@@ -2038,7 -1947,7 +1947,7 @@@ return rc; }
- struct qed_rdma_qp * + static struct qed_rdma_qp * qed_rdma_create_qp(void *rdma_cxt, struct qed_rdma_create_qp_in_params *in_params, struct qed_rdma_create_qp_out_params *out_params) @@@ -2215,9 -2124,9 +2124,9 @@@ static int qed_roce_modify_qp(struct qe return rc; }
- int qed_rdma_modify_qp(void *rdma_cxt, - struct qed_rdma_qp *qp, - struct qed_rdma_modify_qp_in_params *params) + static int qed_rdma_modify_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; enum qed_roce_qp_state prev_state; @@@ -2312,8 -2221,9 +2221,9 @@@ return rc; }
- int qed_rdma_register_tid(void *rdma_cxt, - struct qed_rdma_register_tid_in_params *params) + static int + qed_rdma_register_tid(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_register_tid_ramrod_data *p_ramrod; @@@ -2450,7 -2360,7 +2360,7 @@@ return rc; }
- int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) + static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_deregister_tid_ramrod_data *p_ramrod; @@@ -2561,7 -2471,8 +2471,8 @@@ void qed_rdma_dpm_bar(struct qed_hwfn * qed_rdma_dpm_conf(p_hwfn, p_ptt); }
- int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) + static int qed_rdma_start(void *rdma_cxt, + struct qed_rdma_start_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_ptt *p_ptt; @@@ -2601,7 -2512,7 +2512,7 @@@ static int qed_rdma_init(struct qed_de return qed_rdma_start(QED_LEADING_HWFN(cdev), params); }
- void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) + static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@@ -2747,6 -2658,7 +2658,6 @@@ static int qed_roce_ll2_start(struct qe DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); return -ENOMEM; } - memset(roce_ll2, 0, sizeof(*roce_ll2)); roce_ll2->handle = QED_LL2_UNUSED_HANDLE; roce_ll2->cbs = params->cbs; roce_ll2->cb_cookie = params->cb_cookie; @@@ -2808,11 -2720,6 +2719,6 @@@ static int qed_roce_ll2_stop(struct qed struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; int rc;
- if (!cdev) { - DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n"); - return -EINVAL; - } - if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); return -EINVAL; @@@ -2849,7 -2756,7 +2755,7 @@@ static int qed_roce_ll2_tx(struct qed_d int rc; int i;
- if (!cdev || !pkt || !params) { + if (!pkt || !params) { DP_ERR(cdev, "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", cdev, pkt, params); diff --combined drivers/net/ethernet/qlogic/qed/qed_sp.h index 27c450f,b2c08e4..9c897bc --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@@ -80,7 -80,6 +80,6 @@@ union ramrod_data struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; struct rdma_create_cq_ramrod_data rdma_create_cq; - struct rdma_resize_cq_ramrod_data rdma_resize_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; @@@ -111,8 -110,8 +110,8 @@@ union qed_spq_req_comp };
struct qed_spq_comp_done { - u64 done; - u8 fw_return_code; + unsigned int done; + u8 fw_return_code; };
struct qed_spq_entry { diff --combined drivers/net/ethernet/qlogic/qed/qed_spq.c index 6c05402,9fbaf94..019960b --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@@ -28,20 -28,14 +28,18 @@@ #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" - #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) #include "qed_roce.h" - #endif
/*************************************************************************** * Structures & Definitions ***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) -#define SPQ_BLOCK_SLEEP_LENGTH (1000) + +#define SPQ_BLOCK_DELAY_MAX_ITER (10) +#define SPQ_BLOCK_DELAY_US (10) +#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MS (5)
/*************************************************************************** * Blocking Imp. (BLOCK/EBLOCK mode) @@@ -54,88 -48,60 +52,88 @@@ static void qed_spq_blocking_cb(struct
comp_done = (struct qed_spq_comp_done *)cookie;
- comp_done->done = 0x1; - comp_done->fw_return_code = fw_return_code; + comp_done->fw_return_code = fw_return_code;
- /* make update visible to waiting thread */ - smp_wmb(); + /* Make sure completion done is visible on waiting thread */ + smp_store_release(&comp_done->done, 0x1); }
-static int qed_spq_block(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - u8 *p_fw_ret) +static int __qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool sleep_between_iter) { - int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; struct qed_spq_comp_done *comp_done; - int rc; + u32 iter_cnt;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - while (sleep_count) { - /* validate we receive completion update */ - smp_rmb(); - if (comp_done->done == 1) { + iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + : SPQ_BLOCK_DELAY_MAX_ITER; + + while (iter_cnt--) { + /* Validate we receive completion update */ + if (READ_ONCE(comp_done->done) == 1) { + /* Read updated FW return value */ + smp_read_barrier_depends(); if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } - usleep_range(5000, 10000); - sleep_count--; + + if (sleep_between_iter) + msleep(SPQ_BLOCK_SLEEP_MS); + else + udelay(SPQ_BLOCK_DELAY_US); + } + + return -EBUSY; +} + +static int qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool skip_quick_poll) +{ + struct qed_spq_comp_done *comp_done; + int rc; + + /* A relatively short polling period w/o sleeping, to allow the FW to + * complete the ramrod and thus possibly to avoid the following sleeps. + */ + if (!skip_quick_poll) { + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); + if (!rc) + return 0; }
+ /* Move to polling with a sleeping period between iterations */ + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); - if (rc != 0) + if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); + goto err; + }
/* Retry after drain */ - sleep_count = SPQ_BLOCK_SLEEP_LENGTH; - while (sleep_count) { - /* validate we receive completion update */ - smp_rmb(); - if (comp_done->done == 1) { - if (p_fw_ret) - *p_fw_ret = comp_done->fw_return_code; - return 0; - } - usleep_range(5000, 10000); - sleep_count--; - } + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0;
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } - - DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); +err: + DP_NOTICE(p_hwfn, + "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", + le32_to_cpu(p_ent->elem.hdr.cid), + p_ent->elem.hdr.cmd_id, + p_ent->elem.hdr.protocol_id, + le16_to_cpu(p_ent->elem.hdr.echo));
return -EBUSY; } @@@ -272,11 -238,9 +270,9 @@@ qed_async_event_completion(struct qed_h struct event_ring_entry *p_eqe) { switch (p_eqe->protocol_id) { - #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) case PROTOCOLID_ROCE: qed_async_roce_event(p_hwfn, p_eqe); return 0; - #endif case PROTOCOLID_COMMON: return qed_sriov_eqe_event(p_hwfn, p_eqe->opcode, @@@ -761,8 -725,7 +757,8 @@@ int qed_spq_post(struct qed_hwfn *p_hwf * access p_ent here to see whether it's successful or not. * Thus, after gaining the answer perform the cleanup here. */ - rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, + p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) { /* This is an allocated p_ent which does not need to diff --combined drivers/net/ethernet/qlogic/qede/qede.h index 9135b9d,974689a..cf8d354 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@@ -320,7 -320,6 +320,7 @@@ struct qede_fastpath #define XMIT_L4_CSUM BIT(0) #define XMIT_LSO BIT(1) #define XMIT_ENC BIT(2) +#define XMIT_ENC_GSO_L4_CSUM BIT(3)
#define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) @@@ -349,12 -348,13 +349,13 @@@ bool qede_has_rx_work(struct qede_rx_qu int qede_txq_has_work(struct qede_tx_queue *txq); void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, u8 count); + void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 - #define NUM_RX_BDS_DEF NUM_RX_BDS_MAX + #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) @@@ -362,9 -362,8 +363,9 @@@ #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
-#define QEDE_MIN_PKT_LEN 64 -#define QEDE_RX_HDR_SIZE 256 +#define QEDE_MIN_PKT_LEN 64 +#define QEDE_RX_HDR_SIZE 256 +#define QEDE_MAX_JUMBO_PACKET_SIZE 9600 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */ diff --combined drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b7dbb44,12251a1..0100f5c --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@@ -723,11 -723,19 +723,11 @@@ static void qede_update_mtu(struct qede }
/* Netdevice NDOs */ -#define ETH_MAX_JUMBO_PACKET_SIZE 9600 -#define ETH_MIN_PACKET_SIZE 60 int qede_change_mtu(struct net_device *ndev, int new_mtu) { struct qede_dev *edev = netdev_priv(ndev); union qede_reload_args args;
- if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { - DP_ERR(edev, "Can't support requested MTU size\n"); - return -EINVAL; - } - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Configuring MTU size of %d\n", new_mtu);
@@@ -748,6 -756,8 +748,8 @@@ static void qede_get_channels(struct ne struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev); + channels->max_rx = QEDE_MAX_RSS_CNT(edev); + channels->max_tx = QEDE_MAX_RSS_CNT(edev); channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - edev->fp_num_rx; channels->tx_count = edev->fp_num_tx; @@@ -812,6 -822,13 +814,13 @@@ static int qede_set_channels(struct net edev->req_queues = count; edev->req_num_tx = channels->tx_count; edev->req_num_rx = channels->rx_count; + /* Reset the indirection table if rx queue count is updated */ + if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { + edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; + memset(&edev->rss_params.rss_ind_table, 0, + sizeof(edev->rss_params.rss_ind_table)); + } + if (netif_running(dev)) qede_reload(edev, NULL, NULL);
@@@ -1045,6 -1062,12 +1054,12 @@@ static int qede_set_rxfh(struct net_dev struct qede_dev *edev = netdev_priv(dev); int i;
+ if (edev->dev_info.common.num_hwfns > 1) { + DP_INFO(edev, + "RSS configuration is not supported for 100G devices\n"); + return -EOPNOTSUPP; + } + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP;
@@@ -1176,8 -1199,8 +1191,8 @@@ static int qede_selftest_transmit_traff }
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); txq->sw_tx_cons++; txq->sw_tx_ring[idx].skb = NULL;
@@@ -1191,8 -1214,8 +1206,8 @@@ static int qede_selftest_receive_traffi struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; + int i, rc = 0; u8 *data_ptr; - int i;
for_each_queue(i) { if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { @@@ -1211,46 -1234,60 +1226,60 @@@ * queue and that the loopback traffic is not IP. */ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { - if (qede_has_rx_work(rxq)) + if (!qede_has_rx_work(rxq)) { + usleep_range(100, 200); + continue; + } + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative + * reads of CQE/BD before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, + * and then the CPU reads the hw_comp_cons, it will use an old + * CQE. + */ + rmb(); + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + data_ptr = (u8 *)(page_address(sw_rx_data->data) + + fp_cqe->placement_offset + + sw_rx_data->page_offset); + if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && + ether_addr_equal(data_ptr + ETH_ALEN, + edev->ndev->dev_addr)) { + for (i = ETH_HLEN; i < len; i++) + if (data_ptr[i] != (unsigned char)(i & 0xff)) { + rc = -1; + break; + } + + qede_recycle_rx_bd_ring(rxq, edev, 1); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); break; - usleep_range(100, 200); + } + + DP_INFO(edev, "Not the transmitted packet\n"); + qede_recycle_rx_bd_ring(rxq, edev, 1); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); }
- if (!qede_has_rx_work(rxq)) { + if (i == QEDE_SELFTEST_POLL_COUNT) { DP_NOTICE(edev, "Failed to receive the traffic\n"); return -1; }
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE - * / BD before reading hw_comp_cons. If the CQE is read before it is - * written by FW, then FW writes CQE and SB, and then the CPU reads the - * hw_comp_cons, it will use an old CQE. - */ - rmb(); - - /* Get the CQE from the completion ring */ - cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); - - /* Get the data from the SW ring */ - sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->len_on_first_bd); - data_ptr = (u8 *)(page_address(sw_rx_data->data) + - fp_cqe->placement_offset + sw_rx_data->page_offset); - for (i = ETH_HLEN; i < len; i++) - if (data_ptr[i] != (unsigned char)(i & 0xff)) { - DP_NOTICE(edev, "Loopback test failed\n"); - qede_recycle_rx_bd_ring(rxq, edev, 1); - return -1; - } - - qede_recycle_rx_bd_ring(rxq, edev, 1); - - return 0; + return rc; }
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) diff --combined drivers/net/ethernet/qlogic/qede/qede_main.c index 4f29865,7def29a..8488ad3 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@@ -171,14 -171,10 +171,14 @@@ static struct pci_driver qede_pci_drive #endif };
-static void qede_force_mac(void *dev, u8 *mac) +static void qede_force_mac(void *dev, u8 *mac, bool forced) { struct qede_dev *edev = dev;
+ /* MAC hints take effect only if we haven't set one already */ + if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) + return; + ether_addr_copy(edev->ndev->dev_addr, mac); ether_addr_copy(edev->primary_mac, mac); } @@@ -317,8 -313,8 +317,8 @@@ static int qede_free_tx_pkt(struct qede split_bd_len = BD_UNMAP_LEN(split); bds_consumed++; } - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { @@@ -363,8 -359,8 +363,8 @@@ static void qede_free_failed_tx_pkt(str nbd--; }
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */ for (i = 0; i < nbd; i++) { @@@ -400,19 -396,8 +400,19 @@@ static u32 qede_xmit_type(struct qede_d (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) *ipv6_ext = 1;
- if (skb->encapsulation) + if (skb->encapsulation) { rc |= XMIT_ENC; + if (skb_is_gso(skb)) { + unsigned short gso_type = skb_shinfo(skb)->gso_type; + + if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || + (gso_type & SKB_GSO_GRE_CSUM)) + rc |= XMIT_ENC_GSO_L4_CSUM; + + rc |= XMIT_LSO; + return rc; + } + }
if (skb_is_gso(skb)) rc |= XMIT_LSO; @@@ -648,12 -633,6 +648,12 @@@ static netdev_tx_t qede_start_xmit(stru if (unlikely(xmit_type & XMIT_ENC)) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { + u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + + first_bd->data.bd_flags.bitfields |= 1 << tmp; + } hlen = qede_get_skb_hlen(skb, true); } else { first_bd->data.bd_flags.bitfields |= @@@ -964,8 -943,7 +964,7 @@@ static inline int qede_realloc_rx_buffe return 0; }
- static inline void qede_update_rx_prod(struct qede_dev *edev, - struct qede_rx_queue *rxq) + void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) { u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); @@@ -2240,40 -2218,6 +2239,40 @@@ static void qede_udp_tunnel_del(struct schedule_delayed_work(&edev->sp_task, 0); }
+/* 8B udp header + 8B base tunnel header + 32B option length */ +#define QEDE_MAX_TUN_HDR_LEN 48 + +static netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation) { + u8 l4_proto = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + /* Disable offloads for geneve tunnels, as HW can't parse + * the geneve header which has option length greater than 32B. + */ + if ((l4_proto == IPPROTO_UDP) && + ((skb_inner_mac_header(skb) - + skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) + return features & ~(NETIF_F_CSUM_MASK | + NETIF_F_GSO_MASK); + } + + return features; +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@@ -2298,7 -2242,6 +2297,7 @@@ #endif .ndo_udp_tunnel_add = qede_udp_tunnel_add, .ndo_udp_tunnel_del = qede_udp_tunnel_del, + .ndo_features_check = qede_features_check, };
/* ------------------------------------------------------------------------- @@@ -2365,8 -2308,6 +2364,8 @@@ static void qede_init_ndev(struct qede_
qede_set_ethtool_ops(ndev);
+ ndev->priv_flags = IFF_UNICAST_FLT; + /* user-changeble features */ hw_features = NETIF_F_GRO | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@@ -2374,14 -2315,11 +2373,14 @@@
/* Encap features*/ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_TSO_ECN; + NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM; + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM;
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; @@@ -2391,10 -2329,6 +2390,10 @@@
ndev->hw_features = hw_features;
+ /* MTU range: 46 - 9600 */ + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; + /* Set network device HW mac */ ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); } @@@ -3006,7 -2940,7 +3005,7 @@@ static int qede_alloc_mem_txq(struct qe txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */ - size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; + size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE; txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); if (!txq->sw_tx_ring) { DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); @@@ -3017,7 -2951,7 +3016,7 @@@ QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - NUM_TX_BDS_MAX, + TX_RING_SIZE, sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err; @@@ -3943,7 -3877,7 +3942,7 @@@ static void qede_config_rx_mode(struct
/* Check for promiscuous */ if ((ndev->flags & IFF_PROMISC) || - (uc_count > 15)) { /* @@@TBD resource allocation - 1 */ + (uc_count > edev->dev_info.num_mac_filters - 1)) { accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; } else { /* Add MAC filters according to the unicast secondary macs */ diff --combined drivers/net/ethernet/qualcomm/emac/emac.c index e4e1925,4fede4b..8be526a --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@@ -239,8 -239,15 +239,8 @@@ static void emac_rx_mode_set(struct net /* Change the Maximum Transfer Unit (MTU) */ static int emac_change_mtu(struct net_device *netdev, int new_mtu) { - unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct emac_adapter *adpt = netdev_priv(netdev);
- if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) || - (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) { - netdev_err(adpt->netdev, "error: invalid MTU setting\n"); - return -EINVAL; - } - netif_info(adpt, hw, adpt->netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@@ -568,6 -575,7 +568,7 @@@ static const struct of_device_id emac_d }, {} }; + MODULE_DEVICE_TABLE(of, emac_dt_match);
#if IS_ENABLED(CONFIG_ACPI) static const struct acpi_device_id emac_acpi_match[] = { @@@ -672,12 -680,6 +673,12 @@@ static int emac_probe(struct platform_d netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ /* MTU range: 46 - 9194 */ + netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + INIT_WORK(&adpt->work_thread, emac_work_thread);
/* Initialize queues */ diff --combined drivers/net/ethernet/realtek/r8169.c index b698ea5,bf000d8..2830190 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -6673,6 -6673,10 +6673,6 @@@ static int rtl8169_change_mtu(struct ne { struct rtl8169_private *tp = netdev_priv(dev);
- if (new_mtu < ETH_ZLEN || - new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) - return -EINVAL; - if (new_mtu > ETH_DATA_LEN) rtl_hw_jumbo_enable(tp); else @@@ -8269,7 -8273,8 +8269,8 @@@ static int rtl_init_one(struct pci_dev if ((sizeof(dma_addr_t) > 4) && (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ if (!pci_is_pcie(pdev)) @@@ -8426,10 -8431,6 +8427,10 @@@ dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS;
+ /* MTU range: 60 - hw-specific max */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = rtl_chip_infos[chipset].jumbo_max; + tp->hw_start = cfg->hw_start; tp->event_slow = cfg->event_slow;
diff --combined drivers/net/ethernet/rocker/rocker_main.c index 55b2ab9,24b7464..67df4cf --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@@ -1471,7 -1471,7 +1471,7 @@@ static int rocker_world_check_init(stru if (rocker->wops) { if (rocker->wops->mode != mode) { dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); - return err; + return -EINVAL; } return 0; } @@@ -1953,6 -1953,12 +1953,6 @@@ static int rocker_port_change_mtu(struc int running = netif_running(dev); int err;
-#define ROCKER_PORT_MIN_MTU 68 -#define ROCKER_PORT_MAX_MTU 9000 - - if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) - return -EINVAL; - if (running) rocker_port_stop(dev);
@@@ -2530,11 -2536,9 +2530,11 @@@ static void rocker_port_dev_addr_init(s } }
+#define ROCKER_PORT_MIN_MTU ETH_MIN_MTU +#define ROCKER_PORT_MAX_MTU 9000 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) { - const struct pci_dev *pdev = rocker->pdev; + struct pci_dev *pdev = rocker->pdev; struct rocker_port *rocker_port; struct net_device *dev; int err; @@@ -2542,7 -2546,6 +2542,7 @@@ dev = alloc_etherdev(sizeof(struct rocker_port)); if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); rocker_port = netdev_priv(dev); rocker_port->dev = dev; rocker_port->rocker = rocker; @@@ -2567,10 -2570,6 +2567,10 @@@
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
+ /* MTU range: 68 - 9000 */ + dev->min_mtu = ROCKER_PORT_MIN_MTU; + dev->max_mtu = ROCKER_PORT_MAX_MTU; + err = rocker_world_port_pre_init(rocker_port); if (err) { dev_err(&pdev->dev, "port world pre-init failed\n"); @@@ -2840,37 -2839,20 +2840,37 @@@ static bool rocker_port_dev_check_under return true; }
+struct rocker_walk_data { + struct rocker *rocker; + struct rocker_port *port; +}; + +static int rocker_lower_dev_walk(struct net_device *lower_dev, void *_data) +{ + struct rocker_walk_data *data = _data; + int ret = 0; + + if (rocker_port_dev_check_under(lower_dev, data->rocker)) { + data->port = netdev_priv(lower_dev); + ret = 1; + } + + return ret; +} + struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, struct rocker *rocker) { - struct net_device *lower_dev; - struct list_head *iter; + struct rocker_walk_data data;
if (rocker_port_dev_check_under(dev, rocker)) return netdev_priv(dev);
- netdev_for_each_all_lower_dev(dev, lower_dev, iter) { - if (rocker_port_dev_check_under(lower_dev, rocker)) - return netdev_priv(lower_dev); - } - return NULL; + data.rocker = rocker; + data.port = NULL; + netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &data); + + return data.port; }
static int rocker_netdevice_event(struct notifier_block *unused, diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac.h index f94e028,b15fc55..758b4e2 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@@ -90,6 -90,7 +90,6 @@@ struct stmmac_priv struct mac_device_info *hw; spinlock_t lock;
- struct phy_device *phydev ____cacheline_aligned_in_smp; int oldlink; int speed; int oldduplex; @@@ -144,7 -145,7 +144,7 @@@ int stmmac_mdio_register(struct net_dev int stmmac_mdio_reset(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev);
- int stmmac_ptp_register(struct stmmac_priv *priv); + void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fa4a82f,48e71fa..10909c9 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -221,8 -221,7 +221,8 @@@ static inline u32 stmmac_rx_dirty(struc */ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) { - struct phy_device *phydev = priv->phydev; + struct net_device *ndev = priv->dev; + struct phy_device *phydev = ndev->phydev;
if (likely(priv->plat->fix_mac_speed)) priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); @@@ -280,7 -279,6 +280,7 @@@ static void stmmac_eee_ctrl_timer(unsig */ bool stmmac_eee_init(struct stmmac_priv *priv) { + struct net_device *ndev = priv->dev; unsigned long flags; bool ret = false;
@@@ -297,7 -295,7 +297,7 @@@ int tx_lpi_timer = priv->tx_lpi_timer;
/* Check if the PHY supports EEE */ - if (phy_init_eee(priv->phydev, 1)) { + if (phy_init_eee(ndev->phydev, 1)) { /* To manage at run-time if the EEE cannot be supported * anymore (for example because the lp caps have been * changed). @@@ -329,7 -327,7 +329,7 @@@ tx_lpi_timer); } /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); + priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
ret = true; spin_unlock_irqrestore(&priv->lock, flags); @@@ -678,7 -676,9 +678,9 @@@ static int stmmac_init_ptp(struct stmma priv->hwts_tx_en = 0; priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv); + stmmac_ptp_register(priv); + + return 0; }
static void stmmac_release_ptp(struct stmmac_priv *priv) @@@ -700,7 -700,7 +702,7 @@@ static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; @@@ -883,6 -883,8 +885,6 @@@ static int stmmac_init_phy(struct net_d pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
- priv->phydev = phydev; - return 0; }
@@@ -1710,7 -1712,7 +1712,7 @@@ static int stmmac_hw_setup(struct net_d if (init_ptp) { ret = stmmac_init_ptp(priv); if (ret) - netdev_warn(priv->dev, "PTP support cannot init.\n"); + netdev_warn(priv->dev, "fail to init PTP.\n"); }
#ifdef CONFIG_DEBUG_FS @@@ -1807,8 -1809,8 +1809,8 @@@ static int stmmac_open(struct net_devic
stmmac_init_tx_coalesce(priv);
- if (priv->phydev) - phy_start(priv->phydev); + if (dev->phydev) + phy_start(dev->phydev);
/* Request the IRQ lines */ ret = request_irq(dev->irq, stmmac_interrupt, @@@ -1855,8 -1857,8 +1857,8 @@@ wolirq_error init_error: free_dma_desc_resources(priv); dma_desc_error: - if (priv->phydev) - phy_disconnect(priv->phydev); + if (dev->phydev) + phy_disconnect(dev->phydev);
return ret; } @@@ -1875,9 -1877,10 +1877,9 @@@ static int stmmac_release(struct net_de del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */ - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); }
netif_stop_queue(dev); @@@ -2715,11 -2718,27 +2717,11 @@@ static void stmmac_set_rx_mode(struct n */ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { - struct stmmac_priv *priv = netdev_priv(dev); - int max_mtu; - if (netif_running(dev)) { pr_err("%s: must be stopped to change its MTU\n", dev->name); return -EBUSY; }
- if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) - max_mtu = JUMBO_LEN; - else - max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); - - if (priv->plat->maxmtu < max_mtu) - max_mtu = priv->plat->maxmtu; - - if ((new_mtu < 46) || (new_mtu > max_mtu)) { - pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); - return -EINVAL; - } - dev->mtu = new_mtu;
netdev_update_features(dev); @@@ -2853,6 -2872,7 +2855,6 @@@ static void stmmac_poll_controller(stru */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct stmmac_priv *priv = netdev_priv(dev); int ret = -EOPNOTSUPP;
if (!netif_running(dev)) @@@ -2862,9 -2882,9 +2864,9 @@@ case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - if (!priv->phydev) + if (!dev->phydev) return -EINVAL; - ret = phy_mii_ioctl(priv->phydev, rq, cmd); + ret = phy_mii_ioctl(dev->phydev, rq, cmd); break; case SIOCSHWTSTAMP: ret = stmmac_hwtstamp_ioctl(dev, rq); @@@ -3301,15 -3321,6 +3303,15 @@@ int stmmac_dvr_probe(struct device *dev #endif priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* MTU range: 46 - hw-specific max */ + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + ndev->max_mtu = JUMBO_LEN; + else + ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); + if (priv->plat->maxmtu < ndev->max_mtu) + ndev->max_mtu = priv->plat->maxmtu; + if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@@ -3426,8 -3437,8 +3428,8 @@@ int stmmac_suspend(struct device *dev if (!ndev || !netif_running(ndev)) return 0;
- if (priv->phydev) - phy_stop(priv->phydev); + if (ndev->phydev) + phy_stop(ndev->phydev);
spin_lock_irqsave(&priv->lock, flags);
@@@ -3521,8 -3532,8 +3523,8 @@@ int stmmac_resume(struct device *dev
spin_unlock_irqrestore(&priv->lock, flags);
- if (priv->phydev) - phy_start(priv->phydev); + if (ndev->phydev) + phy_start(ndev->phydev);
return 0; } diff --combined drivers/net/ethernet/synopsys/dwc_eth_qos.c index eaa51ce,5eedac4..7053301 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@@ -982,11 -982,13 +982,13 @@@ static int dwceqos_mii_probe(struct net if (netif_msg_probe(lp)) phy_attached_info(phydev);
- phydev->supported &= PHY_GBIT_FEATURES; + phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause;
lp->link = 0; lp->speed = 0; lp->duplex = DUPLEX_UNKNOWN; + lp->flowcontrol.autoneg = AUTONEG_ENABLE;
return 0; } @@@ -2211,7 -2213,7 +2213,7 @@@ static int dwceqos_start_xmit(struct sk
tx_error: dwceqos_tx_rollback(lp, &trans); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return 0; }
diff --combined drivers/net/geneve.c index 752bcaa,42edd7b..85a423a --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@@ -58,9 -58,9 +58,9 @@@ struct geneve_dev struct hlist_node hlist; /* vni hash table */ struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ - struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */ + struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ #if IS_ENABLED(CONFIG_IPV6) - struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */ + struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ #endif u8 vni[3]; /* virtual network ID for tunnel */ u8 ttl; /* TTL override */ @@@ -453,7 -453,7 +453,7 @@@ static struct sk_buff **geneve_gro_rece
skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0;
out_unlock: @@@ -543,9 -543,19 +543,19 @@@ static void __geneve_sock_release(struc
static void geneve_sock_release(struct geneve_dev *geneve) { - __geneve_sock_release(geneve->sock4); + struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); #if IS_ENABLED(CONFIG_IPV6) - __geneve_sock_release(geneve->sock6); + struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); + + rcu_assign_pointer(geneve->sock6, NULL); + #endif + + rcu_assign_pointer(geneve->sock4, NULL); + synchronize_net(); + + __geneve_sock_release(gs4); + #if IS_ENABLED(CONFIG_IPV6) + __geneve_sock_release(gs6); #endif }
@@@ -586,10 -596,10 +596,10 @@@ out gs->flags = geneve->flags; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) - geneve->sock6 = gs; + rcu_assign_pointer(geneve->sock6, gs); else #endif - geneve->sock4 = gs; + rcu_assign_pointer(geneve->sock4, gs);
hash = geneve_net_vni_hash(geneve->vni); hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); @@@ -603,9 -613,7 +613,7 @@@ static int geneve_open(struct net_devic bool metadata = geneve->collect_md; int ret = 0;
- geneve->sock4 = NULL; #if IS_ENABLED(CONFIG_IPV6) - geneve->sock6 = NULL; if (ipv6 || metadata) ret = geneve_sock_add(geneve, true); #endif @@@ -720,6 -728,9 +728,9 @@@ static struct rtable *geneve_get_v4_rt( struct rtable *rt = NULL; __u8 tos;
+ if (!rcu_dereference(geneve->sock4)) + return ERR_PTR(-EIO); + memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_mark = skb->mark; fl4->flowi4_proto = IPPROTO_UDP; @@@ -772,11 -783,15 +783,15 @@@ static struct dst_entry *geneve_get_v6_ { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); - struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; struct dst_cache *dst_cache; + struct geneve_sock *gs6; __u8 prio;
+ gs6 = rcu_dereference(geneve->sock6); + if (!gs6) + return ERR_PTR(-EIO); + memset(fl6, 0, sizeof(*fl6)); fl6->flowi6_mark = skb->mark; fl6->flowi6_proto = IPPROTO_UDP; @@@ -842,7 -857,7 +857,7 @@@ static netdev_tx_t geneve_xmit_skb(stru struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); - struct geneve_sock *gs4 = geneve->sock4; + struct geneve_sock *gs4; struct rtable *rt = NULL; const struct iphdr *iip; /* interior IP header */ int err = -EINVAL; @@@ -853,6 -868,10 +868,10 @@@ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); u32 flags = geneve->flags;
+ gs4 = rcu_dereference(geneve->sock4); + if (!gs4) + goto tx_error; + if (geneve->collect_md) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); @@@ -932,9 -951,9 +951,9 @@@ static netdev_tx_t geneve6_xmit_skb(str struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); - struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; const struct iphdr *iip; /* interior IP header */ + struct geneve_sock *gs6; int err = -EINVAL; struct flowi6 fl6; __u8 prio, ttl; @@@ -943,6 -962,10 +962,10 @@@ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); u32 flags = geneve->flags;
+ gs6 = rcu_dereference(geneve->sock6); + if (!gs6) + goto tx_error; + if (geneve->collect_md) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); @@@ -1034,18 -1057,39 +1057,18 @@@ static netdev_tx_t geneve_xmit(struct s return geneve_xmit_skb(skb, dev, info); }
-static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) +static int geneve_change_mtu(struct net_device *dev, int new_mtu) { - struct geneve_dev *geneve = netdev_priv(dev); - /* The max_mtu calculation does not take account of GENEVE - * options, to avoid excluding potentially valid - * configurations. + /* Only possible if called internally, ndo_change_mtu path's new_mtu + * is guaranteed to be between dev->min_mtu and dev->max_mtu. */ - int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; - - if (geneve->remote.sa.sa_family == AF_INET6) - max_mtu -= sizeof(struct ipv6hdr); - else - max_mtu -= sizeof(struct iphdr); - - if (new_mtu < 68) - return -EINVAL; - - if (new_mtu > max_mtu) { - if (strict) - return -EINVAL; - - new_mtu = max_mtu; - } + if (new_mtu > dev->max_mtu) + new_mtu = dev->max_mtu;
dev->mtu = new_mtu; return 0; }
-static int geneve_change_mtu(struct net_device *dev, int new_mtu) -{ - return __geneve_change_mtu(dev, new_mtu, true); -} - static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); @@@ -1149,14 -1193,6 +1172,14 @@@ static void geneve_setup(struct net_dev dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ /* MTU range: 68 - (something less than 65535) */ + dev->min_mtu = ETH_MIN_MTU; + /* The max_mtu calculation does not take account of GENEVE + * options, to avoid excluding potentially valid + * configurations. This will be further reduced by IPvX hdr size. + */ + dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; + netif_keep_dst(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; @@@ -1272,13 -1308,10 +1295,13 @@@ static int geneve_configure(struct net
/* make enough headroom for basic scenario */ encap_len = GENEVE_BASE_HLEN + ETH_HLEN; - if (remote->sa.sa_family == AF_INET) + if (remote->sa.sa_family == AF_INET) { encap_len += sizeof(struct iphdr); - else + dev->max_mtu -= sizeof(struct iphdr); + } else { encap_len += sizeof(struct ipv6hdr); + dev->max_mtu -= sizeof(struct ipv6hdr); + } dev->needed_headroom = encap_len + ETH_HLEN;
if (metadata) { @@@ -1478,7 -1511,7 +1501,7 @@@ struct net_device *geneve_dev_create_fb /* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. */ - err = __geneve_change_mtu(dev, IP_MAX_MTU, false); + err = geneve_change_mtu(dev, IP_MAX_MTU); if (err) goto err;
diff --combined drivers/net/hyperv/netvsc_drv.c index 3b28cf1,f638215..9522763 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@@ -447,7 -447,7 +447,7 @@@ static int netvsc_start_xmit(struct sk_ * Setup the sendside checksum offload only if this is not a * GSO packet. */ - if (skb_is_gso(skb)) { + if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) { struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE; @@@ -607,15 -607,18 +607,18 @@@ static struct sk_buff *netvsc_alloc_rec packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net); - if (csum_info) { - /* We only look at the IP checksum here. - * Should we be dropping the packet if checksum - * failed? How do we deal with other checksums - TCP/UDP? - */ - if (csum_info->receive.ip_checksum_succeeded) + + /* skb is already created with CHECKSUM_NONE */ + skb_checksum_none_assert(skb); + + /* + * In Linux, the IP checksum is always checked. + * Do L4 checksum offload if enabled and present. + */ + if (csum_info && (net->features & NETIF_F_RXCSUM)) { + if (csum_info->receive.tcp_checksum_succeeded || + csum_info->receive.udp_checksum_succeeded) skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; }
if (vlan_tci & VLAN_TAG_PRESENT) @@@ -696,12 -699,8 +699,8 @@@ int netvsc_recv_callback(struct hv_devi static void netvsc_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *dev = net_device_ctx->device_ctx; - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info)); }
static void netvsc_get_channels(struct net_device *net, @@@ -872,12 -871,19 +871,12 @@@ static int netvsc_change_mtu(struct net struct netvsc_device *nvdev = ndevctx->nvdev; struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; - int limit = ETH_DATA_LEN; u32 num_chn; int ret = 0;
if (ndevctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV;
- if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) - limit = NETVSC_MTU - ETH_HLEN; - - if (mtu < NETVSC_MTU_MIN || mtu > limit) - return -EINVAL; - ret = netvsc_close(ndev); if (ret) goto out; @@@ -1395,13 -1401,6 +1394,13 @@@ static int netvsc_probe(struct hv_devic netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn);
+ /* MTU range: 68 - 1500 or 65521 */ + net->min_mtu = NETVSC_MTU_MIN; + if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) + net->max_mtu = NETVSC_MTU - ETH_HLEN; + else + net->max_mtu = ETH_DATA_LEN; + ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); diff --combined drivers/net/macsec.c index 0a715ab,d2e61e0..cc00eb0 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@@ -397,6 -397,14 +397,14 @@@ static struct macsec_cb *macsec_skb_cb( #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0
+ static bool send_sci(const struct macsec_secy *secy) + { + const struct macsec_tx_sc *tx_sc = &secy->tx_sc; + + return tx_sc->send_sci || + (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); + } + static sci_t make_sci(u8 *addr, __be16 port) { sci_t sci; @@@ -437,15 -445,15 +445,15 @@@ static unsigned int macsec_extra_len(bo
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ static void macsec_fill_sectag(struct macsec_eth_header *h, - const struct macsec_secy *secy, u32 pn) + const struct macsec_secy *secy, u32 pn, + bool sci_present) { const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); + memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci || - (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { + if (sci_present) { h->tci_an |= MACSEC_TCI_SC; memcpy(&h->secure_channel_id, &secy->sci, sizeof(h->secure_channel_id)); @@@ -650,6 -658,7 +658,7 @@@ static struct sk_buff *macsec_encrypt(s struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; struct macsec_dev *macsec = macsec_priv(dev); + bool sci_present; u32 pn;
secy = &macsec->secy; @@@ -687,7 -696,8 +696,8 @@@
unprotected_len = skb->len; eth = eth_hdr(skb); - hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); + sci_present = send_sci(secy); + hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy); @@@ -696,7 -706,7 +706,7 @@@ kfree_skb(skb); return ERR_PTR(-ENOLINK); } - macsec_fill_sectag(hh, secy, pn); + macsec_fill_sectag(hh, secy, pn, sci_present); macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len); @@@ -726,10 -736,10 +736,10 @@@ skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) { - int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - + int len = skb->len - macsec_hdr_len(sci_present) - secy->icv_len; aead_request_set_crypt(req, sg, sg, len, iv); - aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); + aead_request_set_ad(req, macsec_hdr_len(sci_present)); } else { aead_request_set_crypt(req, sg, sg, 0, iv); aead_request_set_ad(req, skb->len - secy->icv_len); @@@ -1421,7 -1431,14 +1431,7 @@@ static void clear_tx_sa(struct macsec_t macsec_txsa_put(tx_sa); }
-static struct genl_family macsec_fam = { - .id = GENL_ID_GENERATE, - .name = MACSEC_GENL_NAME, - .hdrsize = 0, - .version = MACSEC_GENL_VERSION, - .maxattr = MACSEC_ATTR_MAX, - .netnsok = true, -}; +static struct genl_family macsec_fam;
static struct net_device *get_dev_from_nl(struct net *net, struct nlattr **attrs) @@@ -2648,17 -2665,6 +2658,17 @@@ static const struct genl_ops macsec_gen }, };
+static struct genl_family macsec_fam __ro_after_init = { + .name = MACSEC_GENL_NAME, + .hdrsize = 0, + .version = MACSEC_GENL_VERSION, + .maxattr = MACSEC_ATTR_MAX, + .netnsok = true, + .module = THIS_MODULE, + .ops = macsec_genl_ops, + .n_ops = ARRAY_SIZE(macsec_genl_ops), +}; + static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@@ -2974,8 -2980,6 +2984,8 @@@ static void macsec_free_netdev(struct n static void macsec_setup(struct net_device *dev) { ether_setup(dev); + dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &macsec_netdev_ops; dev->destructor = macsec_free_netdev; @@@ -3466,7 -3470,7 +3476,7 @@@ static int __init macsec_init(void if (err) goto notifier;
- err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops); + err = genl_register_family(&macsec_fam); if (err) goto rtnl;
diff --combined drivers/net/phy/at803x.c index dd47b69,a52b560..c1e52b9 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@@ -42,23 -42,27 +42,28 @@@ #define AT803X_MMD_ACCESS_CONTROL 0x0D #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E #define AT803X_FUNC_DATA 0x4003 + #define AT803X_REG_CHIP_CONFIG 0x1f + #define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D #define AT803X_DEBUG_DATA 0x1E
+ #define AT803X_MODE_CFG_MASK 0x0F + #define AT803X_MODE_CFG_SGMII 0x01 + + #define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ + #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 + #define AT803X_DEBUG_REG_0 0x00 #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05 #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
- #define AT803X_REG_CHIP_CONFIG 0x1f - #define AT803X_BT_BX_REG_SEL 0x8000 - #define ATH8030_PHY_ID 0x004dd076 #define ATH8031_PHY_ID 0x004dd074 #define ATH8035_PHY_ID 0x004dd072 +#define AT803X_PHY_ID_MASK 0xffffffef
MODULE_DESCRIPTION("Atheros 803x PHY driver"); MODULE_AUTHOR("Matus Ujhelyi"); @@@ -210,7 -214,6 +215,6 @@@ static int at803x_suspend(struct phy_de { int value; int wol_enabled; - int ccr;
mutex_lock(&phydev->lock);
@@@ -226,16 -229,6 +230,6 @@@
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII) - goto done; - - /* also power-down SGMII interface */ - ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); - phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); - - done: mutex_unlock(&phydev->lock);
return 0; @@@ -244,7 -237,6 +238,6 @@@ static int at803x_resume(struct phy_device *phydev) { int value; - int ccr;
mutex_lock(&phydev->lock);
@@@ -252,17 -244,6 +245,6 @@@ value &= ~(BMCR_PDOWN | BMCR_ISOLATE); phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII) - goto done; - - /* also power-up SGMII interface */ - ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); - value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE); - phy_write(phydev, MII_BMCR, value); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); - - done: mutex_unlock(&phydev->lock);
return 0; @@@ -382,12 -363,42 +364,42 @@@ static void at803x_link_change_notify(s } }
+ static int at803x_aneg_done(struct phy_device *phydev) + { + int ccr; + + int aneg_done = genphy_aneg_done(phydev); + if (aneg_done != BMSR_ANEGCOMPLETE) + return aneg_done; + + /* + * in SGMII mode, if copper side autoneg is successful, + * also check SGMII side autoneg result + */ + ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); + if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII) + return aneg_done; + + /* switch to SGMII/fiber page */ + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); + + /* check if the SGMII link is OK. */ + if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) { + pr_warn("803x_aneg_done: SGMII link is not ok\n"); + aneg_done = 0; + } + /* switch back to copper page */ + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); + + return aneg_done; + } + static struct phy_driver at803x_driver[] = { { /* ATHEROS 8035 */ .phy_id = ATH8035_PHY_ID, .name = "Atheros 8035 ethernet", - .phy_id_mask = 0xffffffef, + .phy_id_mask = AT803X_PHY_ID_MASK, .probe = at803x_probe, .config_init = at803x_config_init, .set_wol = at803x_set_wol, @@@ -404,7 -415,7 +416,7 @@@ /* ATHEROS 8030 */ .phy_id = ATH8030_PHY_ID, .name = "Atheros 8030 ethernet", - .phy_id_mask = 0xffffffef, + .phy_id_mask = AT803X_PHY_ID_MASK, .probe = at803x_probe, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, @@@ -422,7 -433,7 +434,7 @@@ /* ATHEROS 8031 */ .phy_id = ATH8031_PHY_ID, .name = "Atheros 8031 ethernet", - .phy_id_mask = 0xffffffef, + .phy_id_mask = AT803X_PHY_ID_MASK, .probe = at803x_probe, .config_init = at803x_config_init, .set_wol = at803x_set_wol, @@@ -433,6 -444,7 +445,7 @@@ .flags = PHY_HAS_INTERRUPT, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .aneg_done = at803x_aneg_done, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, } }; @@@ -440,9 -452,9 +453,9 @@@ module_phy_driver(at803x_driver);
static struct mdio_device_id __maybe_unused atheros_tbl[] = { - { ATH8030_PHY_ID, 0xffffffef }, - { ATH8031_PHY_ID, 0xffffffef }, - { ATH8035_PHY_ID, 0xffffffef }, + { ATH8030_PHY_ID, AT803X_PHY_ID_MASK }, + { ATH8031_PHY_ID, AT803X_PHY_ID_MASK }, + { ATH8035_PHY_ID, AT803X_PHY_ID_MASK }, { } };
diff --combined drivers/net/vmxnet3/vmxnet3_drv.c index 0c36de1,ef83ae3..e34b129 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@@ -2279,6 -2279,7 +2279,7 @@@ vmxnet3_set_mc(struct net_device *netde &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; dma_addr_t new_table_pa = 0; + bool new_table_pa_valid = false; u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) { @@@ -2307,13 -2308,15 +2308,15 @@@ new_table, sz, PCI_DMA_TODEVICE); + if (!dma_mapping_error(&adapter->pdev->dev, + new_table_pa)) { + new_mode |= VMXNET3_RXM_MCAST; + new_table_pa_valid = true; + rxConf->mfTablePA = cpu_to_le64( + new_table_pa); + } } - - if (!dma_mapping_error(&adapter->pdev->dev, - new_table_pa)) { - new_mode |= VMXNET3_RXM_MCAST; - rxConf->mfTablePA = cpu_to_le64(new_table_pa); - } else { + if (!new_table_pa_valid) { netdev_info(netdev, "failed to copy mcast list, setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; @@@ -2338,7 -2341,7 +2341,7 @@@ VMXNET3_CMD_UPDATE_MAC_FILTERS); spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table_pa) + if (new_table_pa_valid) dma_unmap_single(&adapter->pdev->dev, new_table_pa, rxConf->mfTableLen, PCI_DMA_TODEVICE); kfree(new_table); @@@ -2969,6 -2972,9 +2972,6 @@@ vmxnet3_change_mtu(struct net_device *n struct vmxnet3_adapter *adapter = netdev_priv(netdev); int err = 0;
- if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) - return -EINVAL; - netdev->mtu = new_mtu;
/* @@@ -3425,10 -3431,6 +3428,10 @@@ vmxnet3_probe_device(struct pci_dev *pd vmxnet3_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ;
+ /* MTU range: 60 - 9000 */ + netdev->min_mtu = VMXNET3_MIN_MTU; + netdev->max_mtu = VMXNET3_MAX_MTU; + INIT_WORK(&adapter->work, vmxnet3_reset_work); set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
diff --combined drivers/net/vxlan.c index c0170b6,f3c2fa3..cb5cc7c --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@@ -583,7 -583,7 +583,7 @@@ static struct sk_buff **vxlan_gro_recei } }
- pp = eth_gro_receive(head, skb); + pp = call_gro_receive(eth_gro_receive, head, skb); flush = 0;
out: @@@ -943,17 -943,20 +943,20 @@@ static bool vxlan_snoop(struct net_devi static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) { struct vxlan_dev *vxlan; + struct vxlan_sock *sock4; + struct vxlan_sock *sock6 = NULL; unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+ sock4 = rtnl_dereference(dev->vn4_sock); + /* The vxlan_sock is only used by dev, leaving group has * no effect on other vxlan devices. */ - if (family == AF_INET && dev->vn4_sock && - atomic_read(&dev->vn4_sock->refcnt) == 1) + if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1) return false; #if IS_ENABLED(CONFIG_IPV6) - if (family == AF_INET6 && dev->vn6_sock && - atomic_read(&dev->vn6_sock->refcnt) == 1) + sock6 = rtnl_dereference(dev->vn6_sock); + if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1) return false; #endif
@@@ -961,10 -964,12 +964,12 @@@ if (!netif_running(vxlan->dev) || vxlan == dev) continue;
- if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock) + if (family == AF_INET && + rtnl_dereference(vxlan->vn4_sock) != sock4) continue; #if IS_ENABLED(CONFIG_IPV6) - if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock) + if (family == AF_INET6 && + rtnl_dereference(vxlan->vn6_sock) != sock6) continue; #endif
@@@ -1005,22 -1010,25 +1010,25 @@@ static bool __vxlan_sock_release_prep(s
static void vxlan_sock_release(struct vxlan_dev *vxlan) { - bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); #if IS_ENABLED(CONFIG_IPV6) - bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + rcu_assign_pointer(vxlan->vn6_sock, NULL); #endif
+ rcu_assign_pointer(vxlan->vn4_sock, NULL); synchronize_net();
- if (ipv4) { - udp_tunnel_sock_release(vxlan->vn4_sock->sock); - kfree(vxlan->vn4_sock); + if (__vxlan_sock_release_prep(sock4)) { + udp_tunnel_sock_release(sock4->sock); + kfree(sock4); }
#if IS_ENABLED(CONFIG_IPV6) - if (ipv6) { - udp_tunnel_sock_release(vxlan->vn6_sock->sock); - kfree(vxlan->vn6_sock); + if (__vxlan_sock_release_prep(sock6)) { + udp_tunnel_sock_release(sock6->sock); + kfree(sock6); } #endif } @@@ -1036,18 -1044,21 +1044,21 @@@ static int vxlan_igmp_join(struct vxlan int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) { + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, .imr_ifindex = ifindex, };
- sk = vxlan->vn4_sock->sock->sk; + sk = sock4->sock->sk; lock_sock(sk); ret = ip_mc_join_group(sk, &mreq); release_sock(sk); #if IS_ENABLED(CONFIG_IPV6) } else { - sk = vxlan->vn6_sock->sock->sk; + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + sk = sock6->sock->sk; lock_sock(sk); ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, &ip->sin6.sin6_addr); @@@ -1067,18 -1078,21 +1078,21 @@@ static int vxlan_igmp_leave(struct vxla int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) { + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, .imr_ifindex = ifindex, };
- sk = vxlan->vn4_sock->sock->sk; + sk = sock4->sock->sk; lock_sock(sk); ret = ip_mc_leave_group(sk, &mreq); release_sock(sk); #if IS_ENABLED(CONFIG_IPV6) } else { - sk = vxlan->vn6_sock->sock->sk; + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + sk = sock6->sock->sk; lock_sock(sk); ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, &ip->sin6.sin6_addr); @@@ -1828,11 -1842,15 +1842,15 @@@ static struct dst_entry *vxlan6_get_rou struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { + struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; int err;
+ if (!sock6) + return ERR_PTR(-EIO); + if (tos && !info) use_cache = false; if (use_cache) { @@@ -1850,7 -1868,7 +1868,7 @@@ fl6.flowi6_proto = IPPROTO_UDP;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - vxlan->vn6_sock->sock->sk, + sock6->sock->sk, &ndst, &fl6); if (err < 0) return ERR_PTR(err); @@@ -1995,9 -2013,11 +2013,11 @@@ static void vxlan_xmit_one(struct sk_bu }
if (dst->sa.sa_family == AF_INET) { - if (!vxlan->vn4_sock) + struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); + + if (!sock4) goto drop; - sk = vxlan->vn4_sock->sock->sk; + sk = sock4->sock->sk;
rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, @@@ -2050,12 -2070,13 +2070,13 @@@ src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { + struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); struct dst_entry *ndst; u32 rt6i_flags;
- if (!vxlan->vn6_sock) + if (!sock6) goto drop; - sk = vxlan->vn6_sock->sock->sk; + sk = sock6->sock->sk;
ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, @@@ -2367,31 -2388,43 +2388,31 @@@ static void vxlan_set_multicast_list(st { }
-static int __vxlan_change_mtu(struct net_device *dev, - struct net_device *lowerdev, - struct vxlan_rdst *dst, int new_mtu, bool strict) +static int vxlan_change_mtu(struct net_device *dev, int new_mtu) { - int max_mtu = IP_MAX_MTU; - - if (lowerdev) - max_mtu = lowerdev->mtu; + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *dst = &vxlan->default_dst; + struct net_device *lowerdev = __dev_get_by_index(vxlan->net, + dst->remote_ifindex); + bool use_ipv6 = false;
if (dst->remote_ip.sa.sa_family == AF_INET6) - max_mtu -= VXLAN6_HEADROOM; - else - max_mtu -= VXLAN_HEADROOM; - - if (new_mtu < 68) - return -EINVAL; + use_ipv6 = true;
- if (new_mtu > max_mtu) { - if (strict) + /* This check is different than dev->max_mtu, because it looks at + * the lowerdev->mtu, rather than the static dev->max_mtu + */ + if (lowerdev) { + int max_mtu = lowerdev->mtu - + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + if (new_mtu > max_mtu) return -EINVAL; - - new_mtu = max_mtu; }
dev->mtu = new_mtu; return 0; }
-static int vxlan_change_mtu(struct net_device *dev, int new_mtu) -{ - struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_rdst *dst = &vxlan->default_dst; - struct net_device *lowerdev = __dev_get_by_index(vxlan->net, - dst->remote_ifindex); - return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); -} - static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@@ -2403,9 -2436,10 +2424,10 @@@ dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) { + struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt;
- if (!vxlan->vn4_sock) + if (!sock4) return -EINVAL; rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, info->key.u.ipv4.dst, @@@ -2417,8 -2451,6 +2439,6 @@@ #if IS_ENABLED(CONFIG_IPV6) struct dst_entry *ndst;
- if (!vxlan->vn6_sock) - return -EINVAL; ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, info->key.label, &info->key.u.ipv6.dst, &info->key.u.ipv6.src, NULL, info); @@@ -2728,10 -2760,10 +2748,10 @@@ static int __vxlan_sock_add(struct vxla return PTR_ERR(vs); #if IS_ENABLED(CONFIG_IPV6) if (ipv6) - vxlan->vn6_sock = vs; + rcu_assign_pointer(vxlan->vn6_sock, vs); else #endif - vxlan->vn4_sock = vs; + rcu_assign_pointer(vxlan->vn4_sock, vs); vxlan_vs_add_dev(vs, vxlan); return 0; } @@@ -2742,9 -2774,9 +2762,9 @@@ static int vxlan_sock_add(struct vxlan_ bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; int ret = 0;
- vxlan->vn4_sock = NULL; + RCU_INIT_POINTER(vxlan->vn4_sock, NULL); #if IS_ENABLED(CONFIG_IPV6) - vxlan->vn6_sock = NULL; + RCU_INIT_POINTER(vxlan->vn6_sock, NULL); if (ipv6 || metadata) ret = __vxlan_sock_add(vxlan, true); #endif @@@ -2783,10 -2815,6 +2803,10 @@@ static int vxlan_dev_configure(struct n vxlan_ether_setup(dev); }
+ /* MTU range: 68 - 65535 */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU; + vxlan->net = src_net;
dst->remote_vni = conf->vni; @@@ -2830,8 -2858,7 +2850,8 @@@ #endif
if (!conf->mtu) - dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + dev->mtu = lowerdev->mtu - + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
needed_headroom = lowerdev->hard_header_len; } else if (vxlan_addr_multicast(&dst->remote_ip)) { @@@ -2840,20 -2867,9 +2860,20 @@@ }
if (conf->mtu) { - err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); - if (err) - return err; + int max_mtu = ETH_MAX_MTU; + + if (lowerdev) + max_mtu = lowerdev->mtu; + + max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + + if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu) + return -EINVAL; + + dev->mtu = conf->mtu; + + if (conf->mtu > max_mtu) + dev->mtu = max_mtu; }
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) diff --combined drivers/target/target_core_user.c index 0f173bf,4756250..2b3c856 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@@ -96,7 -96,7 +96,7 @@@ struct tcmu_dev size_t dev_size; u32 cmdr_size; u32 cmdr_last_cleaned; - /* Offset of data ring from start of mb */ + /* Offset of data area from start of mb */ /* Must add data_off and mb_addr to get the address */ size_t data_off; size_t data_size; @@@ -147,8 -147,8 +147,8 @@@ static const struct genl_multicast_grou };
/* Our generic netlink family */ -static struct genl_family tcmu_genl_family = { - .id = GENL_ID_GENERATE, +static struct genl_family tcmu_genl_family __ro_after_init = { + .module = THIS_MODULE, .hdrsize = 0, .name = "TCM-USER", .version = 1, @@@ -349,7 -349,7 +349,7 @@@ static inline size_t spc_bitmap_free(un
/* * We can't queue a command until we have space available on the cmd ring *and* - * space available on the data ring. + * space available on the data area. * * Called with ring lock held. */ @@@ -389,7 -389,8 +389,8 @@@ static bool is_ring_space_avail(struct return true; }
- static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) + static sense_reason_t + tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; struct se_cmd *se_cmd = tcmu_cmd->se_cmd; @@@ -405,7 -406,7 +406,7 @@@ DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) - return -EINVAL; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/* * Must be a certain minimum size for response sense info, but @@@ -432,11 -433,14 +433,14 @@@ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); data_length += se_cmd->t_bidi_data_sg->length; } - if ((command_size > (udev->cmdr_size / 2)) - || data_length > udev->data_size) - pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " - "cmd/data ring buffers\n", command_size, data_length, + if ((command_size > (udev->cmdr_size / 2)) || + data_length > udev->data_size) { + pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " + "cmd ring/data area\n", command_size, data_length, udev->cmdr_size, udev->data_size); + spin_unlock_irq(&udev->cmdr_lock); + return TCM_INVALID_CDB_FIELD; + }
while (!is_ring_space_avail(udev, command_size, data_length)) { int ret; @@@ -450,7 -454,7 +454,7 @@@ finish_wait(&udev->wait_cmdr, &__wait); if (!ret) { pr_warn("tcmu: command timed out\n"); - return -ETIMEDOUT; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; }
spin_lock_irq(&udev->cmdr_lock); @@@ -487,9 -491,7 +491,7 @@@
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
- /* - * Fix up iovecs, and handle if allocation in data ring wrapped. - */ + /* Handle allocating space from the data area */ iov = &entry->req.iov[0]; iov_cnt = 0; copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE @@@ -526,10 -528,11 +528,11 @@@ mod_timer(&udev->timeout, round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
- return 0; + return TCM_NO_SENSE; }
- static int tcmu_queue_cmd(struct se_cmd *se_cmd) + static sense_reason_t + tcmu_queue_cmd(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); @@@ -538,10 -541,10 +541,10 @@@
tcmu_cmd = tcmu_alloc_cmd(se_cmd); if (!tcmu_cmd) - return -ENOMEM; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = tcmu_queue_cmd_ring(tcmu_cmd); - if (ret < 0) { + if (ret != TCM_NO_SENSE) { pr_err("TCMU: Could not queue command\n"); spin_lock_irq(&udev->commands_lock); idr_remove(&udev->commands, tcmu_cmd->cmd_id); @@@ -561,7 -564,7 +564,7 @@@ static void tcmu_handle_completion(stru if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { /* * cmd has been completed already from timeout, just reclaim - * data ring space and free cmd + * data area space and free cmd */ free_data_area(udev, cmd);
@@@ -1129,20 -1132,9 +1132,9 @@@ static sector_t tcmu_get_blocks(struct }
static sense_reason_t - tcmu_pass_op(struct se_cmd *se_cmd) - { - int ret = tcmu_queue_cmd(se_cmd); - - if (ret != 0) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - else - return TCM_NO_SENSE; - } - - static sense_reason_t tcmu_parse_cdb(struct se_cmd *cmd) { - return passthrough_parse_cdb(cmd, tcmu_pass_op); + return passthrough_parse_cdb(cmd, tcmu_queue_cmd); }
static const struct target_backend_ops tcmu_ops = { diff --combined drivers/usb/gadget/function/u_ether.c index 39a6df1,fe18116..686067d --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@@ -142,6 -142,15 +142,6 @@@ static inline int qlen(struct usb_gadge
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
-static int ueth_change_mtu(struct net_device *net, int new_mtu) -{ - if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) - return -ERANGE; - net->mtu = new_mtu; - - return 0; -} - static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) { struct eth_dev *dev = netdev_priv(net); @@@ -581,8 -590,9 +581,9 @@@ static netdev_tx_t eth_start_xmit(struc
/* throttle high/super speed IRQ rate back slightly */ if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || - dev->gadget->speed == USB_SPEED_SUPER) + req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || + dev->gadget->speed == USB_SPEED_SUPER)) && + !list_empty(&dev->tx_reqs)) ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) : 0;
@@@ -727,6 -737,7 +728,6 @@@ static const struct net_device_ops eth_ .ndo_open = eth_open, .ndo_stop = eth_stop, .ndo_start_xmit = eth_start_xmit, - .ndo_change_mtu = ueth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@@ -789,10 -800,6 +790,10 @@@ struct eth_dev *gether_setup_name(struc
net->ethtool_ops = &ops;
+ /* MTU range: 14 - 15412 */ + net->min_mtu = ETH_HLEN; + net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; + dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); SET_NETDEV_DEVTYPE(net, &gadget_type); diff --combined include/linux/netdevice.h index 458c876,91ee364..20ce8df --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@@ -1456,6 -1456,7 +1456,6 @@@ enum netdev_priv_flags * @ptype_specific: Device-specific, protocol-specific packet handlers * * @adj_list: Directly linked devices, like slaves for bonding - * @all_adj_list: All linked devices, *including* neighbours * @features: Currently active device features * @hw_features: User-changeable features * @@@ -1505,8 -1506,6 +1505,8 @@@ * @if_port: Selectable AUI, TP, ... * @dma: DMA channel * @mtu: Interface MTU value + * @min_mtu: Interface Minimum MTU value + * @max_mtu: Interface Maximum MTU value * @type: Interface hardware type * @hard_header_len: Maximum hardware header length. * @@@ -1674,6 -1673,11 +1674,6 @@@ struct net_device struct list_head lower; } adj_list;
- struct { - struct list_head upper; - struct list_head lower; - } all_adj_list; - netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; @@@ -1722,8 -1726,6 +1722,8 @@@ unsigned char dma;
unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; unsigned short type; unsigned short hard_header_len;
@@@ -2167,7 -2169,10 +2167,10 @@@ struct napi_gro_cb /* Used to determine if flush_id can be ignored */ u8 is_atomic:1;
- /* 5 bit hole */ + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@@ -2178,6 -2183,40 +2181,40 @@@
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+ #define GRO_RECURSION_LIMIT 15 + static inline int gro_recursion_inc_test(struct sk_buff *skb) + { + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; + } + + typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); + static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) + { + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); + } + + typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, + struct sk_buff *); + static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) + { + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(sk, head, skb); + } + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@@ -2647,6 -2686,71 +2684,6 @@@ static inline void skb_gro_remcsum_clea remcsum_unadjust((__sum16 *)ptr, grc->delta); }
-struct skb_csum_offl_spec { - __u16 ipv4_okay:1, - ipv6_okay:1, - encap_okay:1, - ip_options_okay:1, - ext_hdrs_okay:1, - tcp_okay:1, - udp_okay:1, - sctp_okay:1, - vlan_okay:1, - no_encapped_ipv6:1, - no_not_encapped:1; -}; - -bool __skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help); - -static inline bool skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help) -{ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return false; - - return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); -} - -static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec) -{ - bool csum_encapped; - - return skb_csum_offload_chk(skb, spec, &csum_encapped, true); -} - -static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) -{ - static const struct skb_csum_offl_spec csum_offl_spec = { - .ipv4_okay = 1, - .ip_options_okay = 1, - .ipv6_okay = 1, - .vlan_okay = 1, - .tcp_okay = 1, - .udp_okay = 1, - }; - - return skb_csum_offload_chk_help(skb, &csum_offl_spec); -} - -static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) -{ - static const struct skb_csum_offl_spec csum_offl_spec = { - .ipv4_okay = 1, - .ip_options_okay = 1, - .tcp_okay = 1, - .udp_okay = 1, - .vlan_okay = 1, - }; - - return skb_csum_offload_chk_help(skb, &csum_offl_spec); -} - static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@@ -3765,13 -3869,12 +3802,13 @@@ struct net_device *netdev_all_upper_get updev; \ updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-/* iterate through upper list, must be called under RCU read lock */ -#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ - for (iter = &(dev)->all_adj_list.upper, \ - updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ - updev; \ - updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) +int netdev_walk_all_upper_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *upper_dev, + void *data), + void *data); + +bool netdev_has_upper_dev_all_rcu(struct net_device *dev, + struct net_device *upper_dev);
void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); @@@ -3804,14 -3907,17 +3841,14 @@@ struct net_device *netdev_all_lower_get struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, struct list_head **iter);
-#define netdev_for_each_all_lower_dev(dev, ldev, iter) \ - for (iter = (dev)->all_adj_list.lower.next, \ - ldev = netdev_all_lower_get_next(dev, &(iter)); \ - ldev; \ - ldev = netdev_all_lower_get_next(dev, &(iter))) - -#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ - for (iter = &(dev)->all_adj_list.lower, \ - ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ - ldev; \ - ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) +int netdev_walk_all_lower_dev(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); +int netdev_walk_all_lower_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data);
void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); @@@ -3888,6 -3994,19 +3925,6 @@@ static inline bool can_checksum_protoco } }
-/* Map an ethertype into IP protocol if possible */ -static inline int eproto_to_ipproto(int eproto) -{ - switch (eproto) { - case htons(ETH_P_IP): - return IPPROTO_IP; - case htons(ETH_P_IPV6): - return IPPROTO_IPV6; - default: - return -1; - } -} - #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev); #else diff --combined include/linux/skbuff.h index 663fda2,32810f2..cc6e23e --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -936,6 -936,7 +936,7 @@@ struct sk_buff_fclones
/** * skb_fclone_busy - check if fclone is busy + * @sk: socket * @skb: buffer * * Returns true if skb is a fast clone, and its clone is not freed. @@@ -1086,7 -1087,7 +1087,7 @@@ __skb_set_sw_hash(struct sk_buff *skb, }
void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(const struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); diff --combined include/net/cfg80211.h index c575583,14b51d7..2019310 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@@ -772,30 -772,6 +772,30 @@@ struct cfg80211_csa_settings };
/** + * struct iface_combination_params - input parameters for interface combinations + * + * Used to pass interface combination parameters + * + * @num_different_channels: the number of different channels we want + * to use for verification + * @radar_detect: a bitmap where each bit corresponds to a channel + * width where radar detection is needed, as in the definition of + * &struct ieee80211_iface_combination.@radar_detect_widths + * @iftype_num: array with the number of interfaces of each interface + * type. The index is the interface type as specified in &enum + * nl80211_iftype. + * @new_beacon_int: set this to the beacon interval of a new interface + * that's not operating yet, if such is to be checked as part of + * the verification + */ +struct iface_combination_params { + int num_different_channels; + u8 radar_detect; + int iftype_num[NUM_NL80211_IFTYPES]; + u32 new_beacon_int; +}; + +/** * enum station_parameters_apply_mask - station parameter values to apply * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability @@@ -820,9 -796,9 +820,9 @@@ enum station_parameters_apply_mask * (or NULL for no change) * @supported_rates_len: number of supported rates * @sta_flags_mask: station flags that changed - * (bitmask of BIT(NL80211_STA_FLAG_...)) + * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @sta_flags_set: station flags values - * (bitmask of BIT(NL80211_STA_FLAG_...)) + * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @listen_interval: listen interval or -1 for no change * @aid: AID or zero for no change * @peer_aid: mesh peer AID or zero for no change @@@ -1785,11 -1761,9 +1785,11 @@@ const u8 *ieee80211_bss_get_ie(struct c * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication - * @sae_data: Non-IE data to use with SAE or %NULL. This starts with - * Authentication transaction sequence number field. - * @sae_data_len: Length of sae_data buffer in octets + * @auth_data: Fields and elements in Authentication frames. This contains + * the authentication frame body (non-IE and IE data), excluding the + * Authentication algorithm number, i.e., starting at the Authentication + * transaction sequence number field. + * @auth_data_len: Length of auth_data buffer in octets */ struct cfg80211_auth_request { struct cfg80211_bss *bss; @@@ -1798,8 -1772,8 +1798,8 @@@ enum nl80211_auth_type auth_type; const u8 *key; u8 key_len, key_idx; - const u8 *sae_data; - size_t sae_data_len; + const u8 *auth_data; + size_t auth_data_len; };
/** @@@ -1840,12 -1814,6 +1840,12 @@@ enum cfg80211_assoc_req_flags * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT capability override * @vht_capa_mask: VHT capability mask indicating which fields to use + * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or + * %NULL if FILS is not used. + * @fils_kek_len: Length of fils_kek in octets + * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association + * Request/Response frame or %NULL if FILS is not used. This field starts + * with 16 octets of STA Nonce followed by 16 octets of AP Nonce. */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; @@@ -1857,9 -1825,6 +1857,9 @@@ struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa, vht_capa_mask; + const u8 *fils_kek; + size_t fils_kek_len; + const u8 *fils_nonces; };
/** @@@ -2051,18 -2016,6 +2051,18 @@@ struct cfg80211_connect_params };
/** + * enum cfg80211_connect_params_changed - Connection parameters being updated + * + * This enum provides information of all connect parameters that + * have to be updated as part of update_connect_params() call. + * + * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated + */ +enum cfg80211_connect_params_changed { + UPDATE_ASSOC_IES = BIT(0), +}; + +/** * enum wiphy_params_flags - set_wiphy_params bitfield values * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed @@@ -2583,18 -2536,9 +2583,18 @@@ struct cfg80211_nan_func * cases, the result of roaming is indicated with a call to * cfg80211_roamed() or cfg80211_roamed_bss(). * (invoked with the wireless_dev mutex held) - * @disconnect: Disconnect from the BSS/ESS. Once done, call - * cfg80211_disconnected(). + * @update_connect_params: Update the connect parameters while connected to a + * BSS. The updated parameters can be used by driver/firmware for + * subsequent BSS selection (roaming) decisions and to form the + * Authentication/(Re)Association Request frames. This call does not + * request an immediate disassociation or reassociation with the current + * BSS, i.e., this impacts only subsequent (re)associations. The bits in + * changed are defined in &enum cfg80211_connect_params_changed. * (invoked with the wireless_dev mutex held) + * @disconnect: Disconnect from the BSS/ESS or stop connection attempts if + * connection is in progress. Once done, call cfg80211_disconnected() in + * case connection was already established (invoked with the + * wireless_dev mutex held), otherwise call cfg80211_connect_timeout(). * * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call * cfg80211_ibss_joined(), also call that function when changing BSSID due @@@ -2762,8 -2706,6 +2762,8 @@@ * @nan_change_conf: changes NAN configuration. The changed parameters must * be specified in @changes (using &enum cfg80211_nan_conf_changes); * All other parameters must be ignored. + * + * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@@ -2878,10 -2820,6 +2878,10 @@@
int (*connect)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme); + int (*update_connect_params)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_connect_params *sme, + u32 changed); int (*disconnect)(struct wiphy *wiphy, struct net_device *dev, u16 reason_code);
@@@ -3044,10 -2982,6 +3044,10 @@@ struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes); + + int (*set_multicast_to_unicast)(struct wiphy *wiphy, + struct net_device *dev, + const bool enabled); };
/* @@@ -3146,12 -3080,6 +3146,12 @@@ struct ieee80211_iface_limit * only in special cases. * @radar_detect_widths: bitmap of channel widths supported for radar detection * @radar_detect_regions: bitmap of regions supported for radar detection + * @beacon_int_min_gcd: This interface combination supports different + * beacon intervals. + * = 0 - all beacon intervals for different interface must be same. + * > 0 - any beacon interval for the interface part of this combination AND + * *GCD* of all beacon intervals from beaconing interfaces of this + * combination must be greater or equal to this value. * * With this structure the driver can describe which interface * combinations it supports concurrently. @@@ -3160,47 -3088,54 +3160,54 @@@ * * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total: * - * struct ieee80211_iface_limit limits1[] = { - * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, - * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, }, - * }; - * struct ieee80211_iface_combination combination1 = { - * .limits = limits1, - * .n_limits = ARRAY_SIZE(limits1), - * .max_interfaces = 2, - * .beacon_int_infra_match = true, - * }; + * .. code-block:: c + * + * struct ieee80211_iface_limit limits1[] = { + * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, + * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, }, + * }; + * struct ieee80211_iface_combination combination1 = { + * .limits = limits1, + * .n_limits = ARRAY_SIZE(limits1), + * .max_interfaces = 2, + * .beacon_int_infra_match = true, + * }; * * * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total: * - * struct ieee80211_iface_limit limits2[] = { - * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | - * BIT(NL80211_IFTYPE_P2P_GO), }, - * }; - * struct ieee80211_iface_combination combination2 = { - * .limits = limits2, - * .n_limits = ARRAY_SIZE(limits2), - * .max_interfaces = 8, - * .num_different_channels = 1, - * }; + * .. code-block:: c + * + * struct ieee80211_iface_limit limits2[] = { + * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | + * BIT(NL80211_IFTYPE_P2P_GO), }, + * }; + * struct ieee80211_iface_combination combination2 = { + * .limits = limits2, + * .n_limits = ARRAY_SIZE(limits2), + * .max_interfaces = 8, + * .num_different_channels = 1, + * }; * * * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total. * - * This allows for an infrastructure connection and three P2P connections. + * This allows for an infrastructure connection and three P2P connections. + * + * .. code-block:: c + * + * struct ieee80211_iface_limit limits3[] = { + * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, + * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | + * BIT(NL80211_IFTYPE_P2P_CLIENT), }, + * }; + * struct ieee80211_iface_combination combination3 = { + * .limits = limits3, + * .n_limits = ARRAY_SIZE(limits3), + * .max_interfaces = 4, + * .num_different_channels = 2, + * }; * - * struct ieee80211_iface_limit limits3[] = { - * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, - * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | - * BIT(NL80211_IFTYPE_P2P_CLIENT), }, - * }; - * struct ieee80211_iface_combination combination3 = { - * .limits = limits3, - * .n_limits = ARRAY_SIZE(limits3), - * .max_interfaces = 4, - * .num_different_channels = 2, - * }; */ struct ieee80211_iface_combination { const struct ieee80211_iface_limit *limits; @@@ -3210,7 -3145,6 +3217,7 @@@ bool beacon_int_infra_match; u8 radar_detect_widths; u8 radar_detect_regions; + u32 beacon_int_min_gcd; };
struct ieee80211_txrx_stypes { @@@ -3818,8 -3752,8 +3825,8 @@@ struct cfg80211_cached_keys * @beacon_interval: beacon interval used on this device for transmitting * beacons, 0 when not valid * @address: The address for this device, valid only if @netdev is %NULL - * @p2p_started: true if this is a P2P Device that has been started - * @nan_started: true if this is a NAN interface that has been started + * @is_running: true if this is a non-netdev device that has been started, e.g. + * the P2P Device. * @cac_started: true if DFS channel availability check has been started * @cac_start_time: timestamp (jiffies) when the dfs state was entered. * @cac_time_ms: CAC time in ms @@@ -3851,7 -3785,7 +3858,7 @@@ struct wireless_dev
struct mutex mtx;
- bool use_4addr, p2p_started, nan_started; + bool use_4addr, is_running;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@@ -3908,13 -3842,6 +3915,13 @@@ static inline u8 *wdev_address(struct w return wdev->address; }
+static inline bool wdev_running(struct wireless_dev *wdev) +{ + if (wdev->netdev) + return netif_running(wdev->netdev); + return wdev->is_running; +} + /** * wdev_priv - return wiphy priv from wireless_dev * @@@ -4120,14 -4047,29 +4127,29 @@@ unsigned int ieee80211_get_mesh_hdrlen( */
/** + * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3 + * @skb: the 802.11 data frame + * @ehdr: pointer to a &struct ethhdr that will get the header, instead + * of it being pushed into the SKB + * @addr: the device MAC address + * @iftype: the virtual interface type + * Return: 0 on success. Non-zero on error. + */ + int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype); + + /** * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame * @addr: the device MAC address * @iftype: the virtual interface type * Return: 0 on success. Non-zero on error. */ - int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype); + static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) + { + return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype); + }
/** * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 @@@ -4145,22 -4087,23 +4167,23 @@@ int ieee80211_data_from_8023(struct sk_ /** * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame * - * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of - * 802.3 frames. The @list will be empty if the decode fails. The - * @skb is consumed after the function returns. + * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames. + * The @list will be empty if the decode fails. The @skb must be fully + * header-less before being passed in here; it is freed in this function. * - * @skb: The input IEEE 802.11n A-MSDU frame. + * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and * initialized by by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. - * @has_80211_header: Set it true if SKB is with IEEE 802.11 header. + * @check_da: DA to check in the inner ethernet header, or NULL + * @check_sa: SA to check in the inner ethernet header, or NULL */ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, - bool has_80211_header); + const u8 *check_da, const u8 *check_sa);
/** * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame @@@ -4220,27 -4163,6 +4243,27 @@@ static inline const u8 *cfg80211_find_i }
/** + * cfg80211_find_ext_ie - find information element with EID Extension in data + * + * @ext_eid: element ID Extension + * @ies: data consisting of IEs + * @len: length of data + * + * Return: %NULL if the extended element ID could not be found or if + * the element is invalid (claims to be longer than the given + * data), or a pointer to the first byte of the requested + * element, that is the byte containing the element ID. + * + * Note: There are no checks on the element length other than + * having to fit into the given data. + */ +static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len) +{ + return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len, + &ext_eid, 1, 2); +} + +/** * cfg80211_find_vendor_ie - find vendor specific information element in data * * @oui: vendor OUI @@@ -4640,8 -4562,7 +4663,8 @@@ void cfg80211_auth_timeout(struct net_d * moves to cfg80211 in this call * @buf: authentication frame (header + body) * @len: length of the frame data - * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a. + * @uapsd_queues: bitmap of queues configured for uapsd. Same format + * as the AC bitmap in the QoS info field * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). @@@ -5677,20 -5598,36 +5700,20 @@@ unsigned int ieee80211_get_num_supporte * cfg80211_check_combinations - check interface combinations * * @wiphy: the wiphy - * @num_different_channels: the number of different channels we want - * to use for verification - * @radar_detect: a bitmap where each bit corresponds to a channel - * width where radar detection is needed, as in the definition of - * &struct ieee80211_iface_combination.@radar_detect_widths - * @iftype_num: array with the numbers of interfaces of each interface - * type. The index is the interface type as specified in &enum - * nl80211_iftype. + * @params: the interface combinations parameter * * This function can be called by the driver to check whether a * combination of interfaces and their types are allowed according to * the interface combinations. */ int cfg80211_check_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES]); + struct iface_combination_params *params);
/** * cfg80211_iter_combinations - iterate over matching combinations * * @wiphy: the wiphy - * @num_different_channels: the number of different channels we want - * to use for verification - * @radar_detect: a bitmap where each bit corresponds to a channel - * width where radar detection is needed, as in the definition of - * &struct ieee80211_iface_combination.@radar_detect_widths - * @iftype_num: array with the numbers of interfaces of each interface - * type. The index is the interface type as specified in &enum - * nl80211_iftype. + * @params: the interface combinations parameter * @iter: function to call for each matching combination * @data: pointer to pass to iter function * @@@ -5699,7 -5636,9 +5722,7 @@@ * purposes. */ int cfg80211_iter_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES], + struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data); diff --combined include/net/mac80211.h index b9b24ab,e2dba93..5345d35 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@@ -811,14 -811,18 +811,18 @@@ enum mac80211_rate_control_flags * in the control information, and it will be filled by the rate * control algorithm according to what should be sent. For example, * if this array contains, in the format { <idx>, <count> } the - * information + * information:: + * * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } + * * then this means that the frame should be transmitted * up to twice at rate 3, up to twice at rate 2, and up to four * times at rate 1 if it doesn't get acknowledged. Say it gets * acknowledged by the peer after the fifth attempt, the status - * information should then contain + * information should then contain:: + * * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... + * * since it was transmitted twice at rate 3, twice at rate 2 * and once at rate 1 after which we received an acknowledgement. */ @@@ -1168,8 -1172,8 +1172,8 @@@ enum mac80211_rx_vht_flags * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) * @vht_nss: number of streams (VHT only) - * @flag: %RX_FLAG_* - * @vht_flag: %RX_VHT_FLAG_* + * @flag: %RX_FLAG_* + * @vht_flag: %RX_VHT_FLAG_* * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU @@@ -1432,13 -1436,13 +1436,13 @@@ enum ieee80211_vif_flags * @probe_req_reg: probe requests should be reported to mac80211 for this * interface. * @drv_priv: data area for driver use, will always be aligned to - * sizeof(void *). + * sizeof(void *). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) */ struct ieee80211_vif { enum nl80211_iftype type; struct ieee80211_bss_conf bss_conf; - u8 addr[ETH_ALEN]; + u8 addr[ETH_ALEN] __aligned(2); bool p2p; bool csa_active; bool mu_mimo_owner; @@@ -1743,10 -1747,9 +1747,10 @@@ struct ieee80211_sta_rates * @wme: indicates whether the STA supports QoS/WME (if local devices does, * otherwise always false) * @drv_priv: data area for driver use, will always be aligned to - * sizeof(void *), size is determined in hw information. + * sizeof(void *), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid - * if wme is supported. + * if wme is supported. The bits order is like in + * IEEE80211_WMM_IE_STA_QOSINFO_AC_*. * @max_sp: max Service Period. Only valid if wme is supported. * @bandwidth: current bandwidth the station can receive with * @rx_nss: in HT/VHT, the maximum number of spatial streams the @@@ -2026,10 -2029,6 +2030,10 @@@ struct ieee80211_txq * drivers, mac80211 packet loss mechanism will not be triggered and driver * is completely depending on firmware event for station kickout. * + * @IEEE80211_HW_SUPPORTS_TX_FRAG: Hardware does fragmentation by itself. + * The stack will not do fragmentation. + * The callback for @set_frag_threshold should be set as well. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@@ -2071,7 -2070,6 +2075,7 @@@ IEEE80211_HW_TX_AMSDU, IEEE80211_HW_TX_FRAG_LIST, IEEE80211_HW_REPORTS_LOW_ACK, + IEEE80211_HW_SUPPORTS_TX_FRAG,
/* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@@ -2152,12 -2150,12 +2156,12 @@@ * * @radiotap_mcs_details: lists which MCS information can the HW * reports, by default it is set to _MCS, _GI and _BW but doesn't - * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only + * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only * adding _BW is supported today. * * @radiotap_vht_details: lists which VHT MCS information the HW reports, * the default is _GI | _BANDWIDTH. - * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. + * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. * * @radiotap_timestamp: Information for the radiotap timestamp field; if the * 'units_pos' member is set to a non-negative value it must be set to @@@ -2492,6 -2490,7 +2496,7 @@@ void ieee80211_free_txskb(struct ieee80 * in the software stack cares about, we will, in the future, have mac80211 * tell the driver which information elements are interesting in the sense * that we want to see changes in them. This will include + * * - a list of information element IDs * - a list of OUIs for the vendor information element * @@@ -3099,9 -3098,8 +3104,9 @@@ enum ieee80211_reconfig_type * The callback must be atomic. * * @set_frag_threshold: Configuration of fragmentation threshold. Assign this - * if the device does fragmentation by itself; if this callback is - * implemented then the stack will not do fragmentation. + * if the device does fragmentation by itself. Note that to prevent the + * stack from doing fragmentation IEEE80211_HW_SUPPORTS_TX_FRAG + * should be set as well. * The callback can sleep. * * @set_rts_threshold: Configuration of RTS threshold (if device needs it) @@@ -4094,10 -4092,6 +4099,10 @@@ void ieee80211_sta_pspoll(struct ieee80 * This must be used in conjunction with ieee80211_sta_ps_transition() * and possibly ieee80211_sta_pspoll(); calls to all three must be * serialized. + * %IEEE80211_NUM_TIDS can be passed as the tid if the tid is unknown. + * In this case, mac80211 will not check that this tid maps to an AC + * that is trigger enabled and assume that the caller did the proper + * checks. */ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
diff --combined include/net/sock.h index 2764895,73c6b00..f13ac87 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -252,6 -252,7 +252,7 @@@ struct sock_common * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes + * @sk_padding: unused element for alignment * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) @@@ -302,7 -303,8 +303,8 @@@ * @sk_backlog_rcv: callback to process the backlog * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 * @sk_reuseport_cb: reuseport group container - */ + * @sk_rcu: used during RCU grace period + */ struct sock { /* * Now struct inet_timewait_sock also uses sock_common, so please just @@@ -1274,9 -1276,7 +1276,9 @@@ static inline struct inode *SOCK_INODE( /* * Functions for memory accounting */ +int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind); int __sk_mem_schedule(struct sock *sk, int size, int kind); +void __sk_mem_reduce_allocated(struct sock *sk, int amount); void __sk_mem_reclaim(struct sock *sk, int amount);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE) @@@ -1952,8 -1952,6 +1954,8 @@@ void sk_reset_timer(struct sock *sk, st
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, + unsigned int flags); int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
diff --combined include/net/udp.h index 18f1e6b,4948790..6134f37 --- a/include/net/udp.h +++ b/include/net/udp.h @@@ -246,9 -246,6 +246,9 @@@ static inline __be16 udp_flow_src_port( }
/* net/ipv4/udp.c */ +void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); +int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); + void udp_v4_early_demux(struct sk_buff *skb); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, @@@ -261,7 -258,7 +261,8 @@@ void udp_flush_pending_frames(struct so void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); int udp_rcv(struct sk_buff *skb); int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int udp_init_sock(struct sock *sk); + int __udp_disconnect(struct sock *sk, int flags); int udp_disconnect(struct sock *sk, int flags); unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, diff --combined net/8021q/vlan.c index 9accde3,f2531ad..a793655 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@@ -515,8 -515,8 +515,8 @@@ static int vlan_ioctl_handler(struct ne return -EFAULT;
/* Null terminate this sucker, just in case. */ - args.device1[23] = 0; - args.u.device2[23] = 0; + args.device1[sizeof(args.device1) - 1] = 0; + args.u.device2[sizeof(args.u.device2) - 1] = 0;
rtnl_lock();
@@@ -571,7 -571,8 +571,7 @@@ err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; - if ((args.u.name_type >= 0) && - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { struct vlan_net *vn;
vn = net_generic(net, vlan_net_id); @@@ -663,7 -664,7 +663,7 @@@ static struct sk_buff **vlan_gro_receiv
skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: rcu_read_unlock(); diff --combined net/batman-adv/log.h index ab47acf,d2905a8..3284a7b --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@@ -63,7 -63,7 +63,7 @@@ enum batadv_dbg_level BATADV_DBG_NC = BIT(5), BATADV_DBG_MCAST = BIT(6), BATADV_DBG_TP_METER = BIT(7), - BATADV_DBG_ALL = 127, + BATADV_DBG_ALL = 255, };
#ifdef CONFIG_BATMAN_ADV_DEBUG @@@ -71,12 -71,12 +71,12 @@@ int batadv_debug_log(struct batadv_pri __printf(2, 3);
/* possibly ratelimited debug output */ -#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \ - do { \ - if (atomic_read(&bat_priv->log_level) & type && \ - (!ratelimited || net_ratelimit())) \ - batadv_debug_log(bat_priv, fmt, ## arg);\ - } \ +#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \ + do { \ + if (atomic_read(&(bat_priv)->log_level) & (type) && \ + (!(ratelimited) || net_ratelimit())) \ + batadv_debug_log(bat_priv, fmt, ## arg); \ + } \ while (0) #else /* !CONFIG_BATMAN_ADV_DEBUG */ __printf(4, 5) diff --combined net/batman-adv/originator.c index 518b1ed,7c8d160..c6e7e1e --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@@ -364,7 -364,7 +364,7 @@@ struct batadv_orig_ifinfo batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing) { - struct batadv_orig_ifinfo *orig_ifinfo = NULL; + struct batadv_orig_ifinfo *orig_ifinfo; unsigned long reset_time;
spin_lock_bh(&orig_node->neigh_list_lock); @@@ -520,7 -520,7 +520,7 @@@ batadv_hardif_neigh_create(struct batad const u8 *neigh_addr) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_hardif_neigh_node *hardif_neigh = NULL; + struct batadv_hardif_neigh_node *hardif_neigh;
spin_lock_bh(&hard_iface->neigh_list_lock);
@@@ -544,7 -544,7 +544,7 @@@ if (bat_priv->algo_ops->neigh.hardif_init) bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
- hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); + hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
out: spin_unlock_bh(&hard_iface->neigh_list_lock); @@@ -563,7 -563,7 +563,7 @@@ static struct batadv_hardif_neigh_node batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface, const u8 *neigh_addr) { - struct batadv_hardif_neigh_node *hardif_neigh = NULL; + struct batadv_hardif_neigh_node *hardif_neigh;
/* first check without locking to avoid the overhead */ hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr); @@@ -683,7 -683,7 +683,7 @@@ batadv_neigh_node_get_or_create(struct struct batadv_hard_iface *hard_iface, const u8 *neigh_addr) { - struct batadv_neigh_node *neigh_node = NULL; + struct batadv_neigh_node *neigh_node;
/* first check without locking to avoid the overhead */ neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr); @@@ -1021,7 -1021,7 +1021,7 @@@ struct batadv_orig_node *batadv_orig_no batadv_orig_node_vlan_put(vlan);
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { - INIT_HLIST_HEAD(&orig_node->fragments[i].head); + INIT_HLIST_HEAD(&orig_node->fragments[i].fragment_list); spin_lock_init(&orig_node->fragments[i].lock); orig_node->fragments[i].size = 0; } diff --combined net/core/dev.c index 6aa43cd,820bac2..8341dad --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -139,6 -139,7 +139,6 @@@ #include <linux/errqueue.h> #include <linux/hrtimer.h> #include <linux/netfilter_ingress.h> -#include <linux/sctp.h> #include <linux/crash_dump.h>
#include "net-sysfs.h" @@@ -2491,6 -2492,141 +2491,6 @@@ out } EXPORT_SYMBOL(skb_checksum_help);
-/* skb_csum_offload_check - Driver helper function to determine if a device - * with limited checksum offload capabilities is able to offload the checksum - * for a given packet. - * - * Arguments: - * skb - sk_buff for the packet in question - * spec - contains the description of what device can offload - * csum_encapped - returns true if the checksum being offloaded is - * encpasulated. That is it is checksum for the transport header - * in the inner headers. - * checksum_help - when set indicates that helper function should - * call skb_checksum_help if offload checks fail - * - * Returns: - * true: Packet has passed the checksum checks and should be offloadable to - * the device (a driver may still need to check for additional - * restrictions of its device) - * false: Checksum is not offloadable. If checksum_help was set then - * skb_checksum_help was called to resolve checksum for non-GSO - * packets and when IP protocol is not SCTP - */ -bool __skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help) -{ - struct iphdr *iph; - struct ipv6hdr *ipv6; - void *nhdr; - int protocol; - u8 ip_proto; - - if (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) { - if (!spec->vlan_okay) - goto need_help; - } - - /* We check whether the checksum refers to a transport layer checksum in - * the outermost header or an encapsulated transport layer checksum that - * corresponds to the inner headers of the skb. If the checksum is for - * something else in the packet we need help. - */ - if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) { - /* Non-encapsulated checksum */ - protocol = eproto_to_ipproto(vlan_get_protocol(skb)); - nhdr = skb_network_header(skb); - *csum_encapped = false; - if (spec->no_not_encapped) - goto need_help; - } else if (skb->encapsulation && spec->encap_okay && - skb_checksum_start_offset(skb) == - skb_inner_transport_offset(skb)) { - /* Encapsulated checksum */ - *csum_encapped = true; - switch (skb->inner_protocol_type) { - case ENCAP_TYPE_ETHER: - protocol = eproto_to_ipproto(skb->inner_protocol); - break; - case ENCAP_TYPE_IPPROTO: - protocol = skb->inner_protocol; - break; - } - nhdr = skb_inner_network_header(skb); - } else { - goto need_help; - } - - switch (protocol) { - case IPPROTO_IP: - if (!spec->ipv4_okay) - goto need_help; - iph = nhdr; - ip_proto = iph->protocol; - if (iph->ihl != 5 && !spec->ip_options_okay) - goto need_help; - break; - case IPPROTO_IPV6: - if (!spec->ipv6_okay) - goto need_help; - if (spec->no_encapped_ipv6 && *csum_encapped) - goto need_help; - ipv6 = nhdr; - nhdr += sizeof(*ipv6); - ip_proto = ipv6->nexthdr; - break; - default: - goto need_help; - } - -ip_proto_again: - switch (ip_proto) { - case IPPROTO_TCP: - if (!spec->tcp_okay || - skb->csum_offset != offsetof(struct tcphdr, check)) - goto need_help; - break; - case IPPROTO_UDP: - if (!spec->udp_okay || - skb->csum_offset != offsetof(struct udphdr, check)) - goto need_help; - break; - case IPPROTO_SCTP: - if (!spec->sctp_okay || - skb->csum_offset != offsetof(struct sctphdr, checksum)) - goto cant_help; - break; - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_DEST: { - u8 *opthdr = nhdr; - - if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay) - goto need_help; - - ip_proto = opthdr[0]; - nhdr += (opthdr[1] + 1) << 3; - - goto ip_proto_again; - } - default: - goto need_help; - } - - /* Passed the tests for offloading checksum */ - return true; - -need_help: - if (csum_help && !skb_shinfo(skb)->gso_size) - skb_checksum_help(skb); -cant_help: - return false; -} -EXPORT_SYMBOL(__skb_csum_offload_chk); - __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { __be16 type = skb->protocol; @@@ -2899,6 -3035,7 +2899,7 @@@ struct sk_buff *validate_xmit_skb_list( } return head; } + EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb) { @@@ -3709,7 -3846,7 +3710,7 @@@ int netif_rx_ni(struct sk_buff *skb } EXPORT_SYMBOL(netif_rx_ni);
- static void net_tx_action(struct softirq_action *h) + static __latent_entropy void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@@ -4375,6 -4512,7 +4376,7 @@@ static enum gro_result dev_gro_receive( NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->recursion_counter = 0; NAPI_GRO_CB(skb)->is_fou = 0; NAPI_GRO_CB(skb)->is_atomic = 1; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; @@@ -5062,7 -5200,7 +5064,7 @@@ out_unlock return work; }
- static void net_rx_action(struct softirq_action *h) + static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; @@@ -5137,13 -5275,6 +5139,13 @@@ static struct netdev_adjacent *__netdev return NULL; }
+static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data) +{ + struct net_device *dev = data; + + return upper_dev == dev; +} + /** * netdev_has_upper_dev - Check if device is linked to an upper device * @dev: device @@@ -5158,30 -5289,11 +5160,30 @@@ bool netdev_has_upper_dev(struct net_de { ASSERT_RTNL();
- return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper); + return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, + upper_dev); } EXPORT_SYMBOL(netdev_has_upper_dev);
/** + * netdev_has_upper_dev_all - Check if device is linked to an upper device + * @dev: device + * @upper_dev: upper device to check + * + * Find out if a device is linked to specified upper device and return true + * in case it is. Note that this checks the entire upper device chain. + * The caller must hold rcu lock. + */ + +bool netdev_has_upper_dev_all_rcu(struct net_device *dev, + struct net_device *upper_dev) +{ + return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, + upper_dev); +} +EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); + +/** * netdev_has_any_upper_dev - Check if device is linked to some device * @dev: device * @@@ -5192,7 -5304,7 +5194,7 @@@ static bool netdev_has_any_upper_dev(st { ASSERT_RTNL();
- return !list_empty(&dev->all_adj_list.upper); + return !list_empty(&dev->adj_list.upper); }
/** @@@ -5219,20 -5331,6 +5221,20 @@@ struct net_device *netdev_master_upper_ } EXPORT_SYMBOL(netdev_master_upper_dev_get);
+/** + * netdev_has_any_lower_dev - Check if device is linked to some device + * @dev: device + * + * Find out if a device is linked to a lower device and return true in case + * it is. The caller must hold the RTNL lock. + */ +static bool netdev_has_any_lower_dev(struct net_device *dev) +{ + ASSERT_RTNL(); + + return !list_empty(&dev->adj_list.lower); +} + void *netdev_adjacent_get_private(struct list_head *adj_list) { struct netdev_adjacent *adj; @@@ -5269,8 -5367,16 +5271,8 @@@ struct net_device *netdev_upper_get_nex } EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
-/** - * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list - * @dev: device - * @iter: list_head ** of the current position - * - * Gets the next device from the dev's upper list, starting from iter - * position. The caller must hold RCU read lock. - */ -struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, - struct list_head **iter) +static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, + struct list_head **iter) { struct netdev_adjacent *upper;
@@@ -5278,41 -5384,14 +5280,41 @@@
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
- if (&upper->list == &dev->all_adj_list.upper) + if (&upper->list == &dev->adj_list.upper) return NULL;
*iter = &upper->list;
return upper->dev; } -EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu); + +int netdev_walk_all_upper_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *dev, + void *data), + void *data) +{ + struct net_device *udev; + struct list_head *iter; + int ret; + + for (iter = &dev->adj_list.upper, + udev = netdev_next_upper_dev_rcu(dev, &iter); + udev; + udev = netdev_next_upper_dev_rcu(dev, &iter)) { + /* first is the upper device itself */ + ret = fn(udev, data); + if (ret) + return ret; + + /* then look at all of its upper devices */ + ret = netdev_walk_all_upper_dev_rcu(udev, fn, data); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
/** * netdev_lower_get_next_private - Get the next ->private from the @@@ -5395,90 -5474,55 +5397,90 @@@ void *netdev_lower_get_next(struct net_ } EXPORT_SYMBOL(netdev_lower_get_next);
-/** - * netdev_all_lower_get_next - Get the next device from all lower neighbour list - * @dev: device - * @iter: list_head ** of the current position - * - * Gets the next netdev_adjacent from the dev's all lower neighbour - * list, starting from iter position. The caller must hold RTNL lock or - * its own locking that guarantees that the neighbour all lower - * list will remain unchanged. - */ -struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct list_head **iter) +static struct net_device *netdev_next_lower_dev(struct net_device *dev, + struct list_head **iter) { struct netdev_adjacent *lower;
- lower = list_entry(*iter, struct netdev_adjacent, list); + lower = list_entry((*iter)->next, struct netdev_adjacent, list);
- if (&lower->list == &dev->all_adj_list.lower) + if (&lower->list == &dev->adj_list.lower) return NULL;
- *iter = lower->list.next; + *iter = &lower->list;
return lower->dev; } -EXPORT_SYMBOL(netdev_all_lower_get_next);
-/** - * netdev_all_lower_get_next_rcu - Get the next device from all - * lower neighbour list, RCU variant - * @dev: device - * @iter: list_head ** of the current position - * - * Gets the next netdev_adjacent from the dev's all lower neighbour - * list, starting from iter position. The caller must hold RCU read lock. - */ -struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, - struct list_head **iter) +int netdev_walk_all_lower_dev(struct net_device *dev, + int (*fn)(struct net_device *dev, + void *data), + void *data) +{ + struct net_device *ldev; + struct list_head *iter; + int ret; + + for (iter = &dev->adj_list.lower, + ldev = netdev_next_lower_dev(dev, &iter); + ldev; + ldev = netdev_next_lower_dev(dev, &iter)) { + /* first is the lower device itself */ + ret = fn(ldev, data); + if (ret) + return ret; + + /* then look at all of its lower devices */ + ret = netdev_walk_all_lower_dev(ldev, fn, data); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); + +static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, + struct list_head **iter) { struct netdev_adjacent *lower;
lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); - - if (&lower->list == &dev->all_adj_list.lower) + if (&lower->list == &dev->adj_list.lower) return NULL;
*iter = &lower->list;
return lower->dev; } -EXPORT_SYMBOL(netdev_all_lower_get_next_rcu); + +int netdev_walk_all_lower_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *dev, + void *data), + void *data) +{ + struct net_device *ldev; + struct list_head *iter; + int ret; + + for (iter = &dev->adj_list.lower, + ldev = netdev_next_lower_dev_rcu(dev, &iter); + ldev; + ldev = netdev_next_lower_dev_rcu(dev, &iter)) { + /* first is the lower device itself */ + ret = fn(ldev, data); + if (ret) + return ret; + + /* then look at all of its lower devices */ + ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
/** * netdev_lower_get_first_private_rcu - Get the first ->private from the @@@ -5551,6 -5595,7 +5553,6 @@@ static inline bool netdev_adjacent_is_n
static int __netdev_adjacent_dev_insert(struct net_device *dev, struct net_device *adj_dev, - u16 ref_nr, struct list_head *dev_list, void *private, bool master) { @@@ -5560,10 -5605,7 +5562,10 @@@ adj = __netdev_find_adj(adj_dev, dev_list);
if (adj) { - adj->ref_nr += ref_nr; + adj->ref_nr += 1; + pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", + dev->name, adj_dev->name, adj->ref_nr); + return 0; }
@@@ -5573,12 -5615,12 +5575,12 @@@
adj->dev = adj_dev; adj->master = master; - adj->ref_nr = ref_nr; + adj->ref_nr = 1; adj->private = private; dev_hold(adj_dev);
- pr_debug("dev_hold for %s, because of link added from %s to %s\n", - adj_dev->name, dev->name, adj_dev->name); + pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", + dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); @@@ -5617,22 -5659,17 +5619,22 @@@ static void __netdev_adjacent_dev_remov { struct netdev_adjacent *adj;
+ pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", + dev->name, adj_dev->name, ref_nr); + adj = __netdev_find_adj(adj_dev, dev_list);
if (!adj) { - pr_err("tried to remove device %s from %s\n", + pr_err("Adjacency does not exist for device %s from %s\n", dev->name, adj_dev->name); - BUG(); + WARN_ON(1); + return; }
if (adj->ref_nr > ref_nr) { - pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name, - ref_nr, adj->ref_nr-ref_nr); + pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", + dev->name, adj_dev->name, ref_nr, + adj->ref_nr - ref_nr); adj->ref_nr -= ref_nr; return; } @@@ -5644,7 -5681,7 +5646,7 @@@ netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list); - pr_debug("dev_put for %s, because link removed from %s to %s\n", + pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); dev_put(adj_dev); kfree_rcu(adj, rcu); @@@ -5652,27 -5689,38 +5654,27 @@@
static int __netdev_adjacent_dev_link_lists(struct net_device *dev, struct net_device *upper_dev, - u16 ref_nr, struct list_head *up_list, struct list_head *down_list, void *private, bool master) { int ret;
- ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list, + ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, master); if (ret) return ret;
- ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list, + ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, false); if (ret) { - __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); + __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); return ret; }
return 0; }
-static int __netdev_adjacent_dev_link(struct net_device *dev, - struct net_device *upper_dev, - u16 ref_nr) -{ - return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr, - &dev->all_adj_list.upper, - &upper_dev->all_adj_list.lower, - NULL, false); -} - static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, struct net_device *upper_dev, u16 ref_nr, @@@ -5683,19 -5731,40 +5685,19 @@@ __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); }
-static void __netdev_adjacent_dev_unlink(struct net_device *dev, - struct net_device *upper_dev, - u16 ref_nr) -{ - __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr, - &dev->all_adj_list.upper, - &upper_dev->all_adj_list.lower); -} - static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, struct net_device *upper_dev, void *private, bool master) { - int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1); - - if (ret) - return ret; - - ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1, - &dev->adj_list.upper, - &upper_dev->adj_list.lower, - private, master); - if (ret) { - __netdev_adjacent_dev_unlink(dev, upper_dev, 1); - return ret; - } - - return 0; + return __netdev_adjacent_dev_link_lists(dev, upper_dev, + &dev->adj_list.upper, + &upper_dev->adj_list.lower, + private, master); }
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, struct net_device *upper_dev) { - __netdev_adjacent_dev_unlink(dev, upper_dev, 1); __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, &dev->adj_list.upper, &upper_dev->adj_list.lower); @@@ -5706,6 -5775,7 +5708,6 @@@ static int __netdev_upper_dev_link(stru void *upper_priv, void *upper_info) { struct netdev_notifier_changeupper_info changeupper_info; - struct netdev_adjacent *i, *j, *to_i, *to_j; int ret = 0;
ASSERT_RTNL(); @@@ -5714,10 -5784,10 +5716,10 @@@ return -EBUSY;
/* To prevent loops, check if dev is not upper device to upper_dev. */ - if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper)) + if (netdev_has_upper_dev(upper_dev, dev)) return -EBUSY;
- if (__netdev_find_adj(upper_dev, &dev->adj_list.upper)) + if (netdev_has_upper_dev(dev, upper_dev)) return -EEXIST;
if (master && netdev_master_upper_dev_get(dev)) @@@ -5739,15 -5809,80 +5741,15 @@@ if (ret) return ret;
- /* Now that we linked these devs, make all the upper_dev's - * all_adj_list.upper visible to every dev's all_adj_list.lower an - * versa, and don't forget the devices itself. All of these - * links are non-neighbours. - */ - list_for_each_entry(i, &dev->all_adj_list.lower, list) { - list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { - pr_debug("Interlinking %s with %s, non-neighbour\n", - i->dev->name, j->dev->name); - ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr); - if (ret) - goto rollback_mesh; - } - } - - /* add dev to every upper_dev's upper device */ - list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { - pr_debug("linking %s's upper device %s with %s\n", - upper_dev->name, i->dev->name, dev->name); - ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr); - if (ret) - goto rollback_upper_mesh; - } - - /* add upper_dev to every dev's lower device */ - list_for_each_entry(i, &dev->all_adj_list.lower, list) { - pr_debug("linking %s's lower device %s with %s\n", dev->name, - i->dev->name, upper_dev->name); - ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr); - if (ret) - goto rollback_lower_mesh; - } - ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); ret = notifier_to_errno(ret); if (ret) - goto rollback_lower_mesh; + goto rollback;
return 0;
-rollback_lower_mesh: - to_i = i; - list_for_each_entry(i, &dev->all_adj_list.lower, list) { - if (i == to_i) - break; - __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr); - } - - i = NULL; - -rollback_upper_mesh: - to_i = i; - list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { - if (i == to_i) - break; - __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); - } - - i = j = NULL; - -rollback_mesh: - to_i = i; - to_j = j; - list_for_each_entry(i, &dev->all_adj_list.lower, list) { - list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { - if (i == to_i && j == to_j) - break; - __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr); - } - if (i == to_i) - break; - } - +rollback: __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
return ret; @@@ -5804,6 -5939,7 +5806,6 @@@ void netdev_upper_dev_unlink(struct net struct net_device *upper_dev) { struct netdev_notifier_changeupper_info changeupper_info; - struct netdev_adjacent *i, *j; ASSERT_RTNL();
changeupper_info.upper_dev = upper_dev; @@@ -5815,6 -5951,23 +5817,6 @@@
__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
- /* Here is the tricky part. We must remove all dev's lower - * devices from all upper_dev's upper devices and vice - * versa, to maintain the graph relationship. - */ - list_for_each_entry(i, &dev->all_adj_list.lower, list) - list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) - __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr); - - /* remove also the devices itself from lower/upper device - * list - */ - list_for_each_entry(i, &dev->all_adj_list.lower, list) - __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr); - - list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) - __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); - call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); } @@@ -6352,18 -6505,9 +6354,18 @@@ int dev_set_mtu(struct net_device *dev if (new_mtu == dev->mtu) return 0;
- /* MTU must be positive. */ - if (new_mtu < 0) + /* MTU must be positive, and in range */ + if (new_mtu < 0 || new_mtu < dev->min_mtu) { + net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n", + dev->name, new_mtu, dev->min_mtu); return -EINVAL; + } + + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { + net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n", + dev->name, new_mtu, dev->max_mtu); + return -EINVAL; + }
if (!netif_device_present(dev)) return -ENODEV; @@@ -6638,7 -6782,6 +6640,7 @@@ static void rollback_registered_many(st
/* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev)); + WARN_ON(netdev_has_any_lower_dev(dev));
/* Remove entries from kobject tree */ netdev_unregister_kobject(dev); @@@ -7517,6 -7660,8 +7519,6 @@@ struct net_device *alloc_netdev_mqs(in INIT_LIST_HEAD(&dev->link_watch_list); INIT_LIST_HEAD(&dev->adj_list.upper); INIT_LIST_HEAD(&dev->adj_list.lower); - INIT_LIST_HEAD(&dev->all_adj_list.upper); - INIT_LIST_HEAD(&dev->all_adj_list.lower); INIT_LIST_HEAD(&dev->ptype_all); INIT_LIST_HEAD(&dev->ptype_specific); #ifdef CONFIG_NET_SCHED diff --combined net/core/flow_dissector.c index 0cc607d,ab193e5..87e0181 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@@ -246,15 -246,13 +246,13 @@@ ipv6 case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; + struct vlan_hdr _vlan; + bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(skb)) + if (vlan_tag_present) proto = skb->protocol;
- if (!skb_vlan_tag_present(skb) || - proto == cpu_to_be16(ETH_P_8021Q) || - proto == cpu_to_be16(ETH_P_8021AD)) { - struct vlan_hdr _vlan; - + if (!vlan_tag_present || eth_type_vlan(skb->protocol)) { vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) @@@ -272,7 -270,7 +270,7 @@@ FLOW_DISSECTOR_KEY_VLAN, target_container);
- if (skb_vlan_tag_present(skb)) { + if (vlan_tag_present) { key_vlan->vlan_id = skb_vlan_tag_get_id(skb); key_vlan->vlan_priority = (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); @@@ -723,7 -721,7 +721,7 @@@ EXPORT_SYMBOL(make_flow_keys_digest)
static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
-u32 __skb_get_hash_symmetric(struct sk_buff *skb) +u32 __skb_get_hash_symmetric(const struct sk_buff *skb) { struct flow_keys keys;
diff --combined net/core/net_namespace.c index b9243b1,f61c0e0..1309d78 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@@ -215,13 -215,14 +215,14 @@@ static void rtnl_net_notifyid(struct ne */ int peernet2id_alloc(struct net *net, struct net *peer) { + unsigned long flags; bool alloc; int id;
- spin_lock_bh(&net->nsid_lock); + spin_lock_irqsave(&net->nsid_lock, flags); alloc = atomic_read(&peer->count) == 0 ? false : true; id = __peernet2id_alloc(net, peer, &alloc); - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irqrestore(&net->nsid_lock, flags); if (alloc && id >= 0) rtnl_net_notifyid(net, RTM_NEWNSID, id); return id; @@@ -230,11 -231,12 +231,12 @@@ /* This function returns, if assigned, the id of a peer netns. */ int peernet2id(struct net *net, struct net *peer) { + unsigned long flags; int id;
- spin_lock_bh(&net->nsid_lock); + spin_lock_irqsave(&net->nsid_lock, flags); id = __peernet2id(net, peer); - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irqrestore(&net->nsid_lock, flags); return id; } EXPORT_SYMBOL(peernet2id); @@@ -249,17 -251,18 +251,18 @@@ bool peernet_has_id(struct net *net, st
struct net *get_net_ns_by_id(struct net *net, int id) { + unsigned long flags; struct net *peer;
if (id < 0) return NULL;
rcu_read_lock(); - spin_lock_bh(&net->nsid_lock); + spin_lock_irqsave(&net->nsid_lock, flags); peer = idr_find(&net->netns_ids, id); if (peer) get_net(peer); - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irqrestore(&net->nsid_lock, flags); rcu_read_unlock();
return peer; @@@ -379,14 -382,7 +382,14 @@@ struct net *copy_net_ns(unsigned long f
get_user_ns(user_ns);
- mutex_lock(&net_mutex); + rv = mutex_lock_killable(&net_mutex); + if (rv < 0) { + net_free(net); + dec_net_namespaces(ucounts); + put_user_ns(user_ns); + return ERR_PTR(rv); + } + net->ucounts = ucounts; rv = setup_net(net, user_ns); if (rv == 0) { @@@ -429,17 -425,17 +432,17 @@@ static void cleanup_net(struct work_str for_each_net(tmp) { int id;
- spin_lock_bh(&tmp->nsid_lock); + spin_lock_irq(&tmp->nsid_lock); id = __peernet2id(tmp, net); if (id >= 0) idr_remove(&tmp->netns_ids, id); - spin_unlock_bh(&tmp->nsid_lock); + spin_unlock_irq(&tmp->nsid_lock); if (id >= 0) rtnl_net_notifyid(tmp, RTM_DELNSID, id); } - spin_lock_bh(&net->nsid_lock); + spin_lock_irq(&net->nsid_lock); idr_destroy(&net->netns_ids); - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irq(&net->nsid_lock);
} rtnl_unlock(); @@@ -568,6 -564,7 +571,7 @@@ static int rtnl_net_newid(struct sk_buf { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; + unsigned long flags; struct net *peer; int nsid, err;
@@@ -588,15 -585,15 +592,15 @@@ if (IS_ERR(peer)) return PTR_ERR(peer);
- spin_lock_bh(&net->nsid_lock); + spin_lock_irqsave(&net->nsid_lock, flags); if (__peernet2id(net, peer) >= 0) { - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irqrestore(&net->nsid_lock, flags); err = -EEXIST; goto out; }
err = alloc_netid(net, peer, nsid); - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irqrestore(&net->nsid_lock, flags); if (err >= 0) { rtnl_net_notifyid(net, RTM_NEWNSID, err); err = 0; @@@ -718,10 -715,11 +722,11 @@@ static int rtnl_net_dumpid(struct sk_bu .idx = 0, .s_idx = cb->args[0], }; + unsigned long flags;
- spin_lock_bh(&net->nsid_lock); + spin_lock_irqsave(&net->nsid_lock, flags); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); - spin_unlock_bh(&net->nsid_lock); + spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx; return skb->len; diff --combined net/ethernet/eth.c index f983c10,02acfff..d9e2fe1 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@@ -322,7 -322,8 +322,7 @@@ EXPORT_SYMBOL(eth_mac_addr) */ int eth_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu < 68 || new_mtu > ETH_DATA_LEN) - return -EINVAL; + netdev_warn(dev, "%s is deprecated\n", __func__); dev->mtu = new_mtu; return 0; } @@@ -356,8 -357,6 +356,8 @@@ void ether_setup(struct net_device *dev dev->type = ARPHRD_ETHER; dev->hard_header_len = ETH_HLEN; dev->mtu = ETH_DATA_LEN; + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = ETH_DATA_LEN; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 1000; /* Ethernet wants good queues */ dev->flags = IFF_BROADCAST|IFF_MULTICAST; @@@ -440,7 -439,7 +440,7 @@@ struct sk_buff **eth_gro_receive(struc
skb_gro_pull(skb, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: rcu_read_unlock(); diff --combined net/ipv4/fou.c index 6cb57bb,030d153..805f660 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@@ -249,7 -249,7 +249,7 @@@ static struct sk_buff **fou_gro_receive if (!ops || !ops->callbacks.gro_receive) goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: rcu_read_unlock(); @@@ -441,7 -441,7 +441,7 @@@ next_proto if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); flush = 0;
out_unlock: @@@ -622,7 -622,14 +622,7 @@@ static int fou_destroy(struct net *net return err; }
-static struct genl_family fou_nl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = 0, - .name = FOU_GENL_NAME, - .version = FOU_GENL_VERSION, - .maxattr = FOU_ATTR_MAX, - .netnsok = true, -}; +static struct genl_family fou_nl_family;
static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = { [FOU_ATTR_PORT] = { .type = NLA_U16, }, @@@ -824,17 -831,6 +824,17 @@@ static const struct genl_ops fou_nl_ops }, };
+static struct genl_family fou_nl_family __ro_after_init = { + .hdrsize = 0, + .name = FOU_GENL_NAME, + .version = FOU_GENL_VERSION, + .maxattr = FOU_ATTR_MAX, + .netnsok = true, + .module = THIS_MODULE, + .ops = fou_nl_ops, + .n_ops = ARRAY_SIZE(fou_nl_ops), +}; + size_t fou_encap_hlen(struct ip_tunnel_encap *e) { return sizeof(struct udphdr); @@@ -1090,7 -1086,8 +1090,7 @@@ static int __init fou_init(void if (ret) goto exit;
- ret = genl_register_family_with_ops(&fou_nl_family, - fou_nl_ops); + ret = genl_register_family(&fou_nl_family); if (ret < 0) goto unregister;
diff --combined net/ipv4/raw.c index 03618ed,ecbe5a7..d78d738 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@@ -89,10 -89,9 +89,10 @@@ struct raw_frag_vec int hlen; };
-static struct raw_hashinfo raw_v4_hashinfo = { +struct raw_hashinfo raw_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), }; +EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
int raw_hash_sk(struct sock *sk) { @@@ -121,7 -120,7 +121,7 @@@ void raw_unhash_sk(struct sock *sk } EXPORT_SYMBOL_GPL(raw_unhash_sk);
-static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, +struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif) { sk_for_each_from(sk) { @@@ -137,7 -136,6 +137,7 @@@ found: return sk; } +EXPORT_SYMBOL_GPL(__raw_v4_lookup);
/* * 0 - deliver @@@ -914,27 -912,13 +914,27 @@@ static int compat_raw_ioctl(struct soc } #endif
+int raw_abort(struct sock *sk, int err) +{ + lock_sock(sk); + + sk->sk_err = err; + sk->sk_error_report(sk); + udp_disconnect(sk, 0); + + release_sock(sk); + + return 0; +} +EXPORT_SYMBOL_GPL(raw_abort); + struct proto raw_prot = { .name = "RAW", .owner = THIS_MODULE, .close = raw_close, .destroy = raw_destroy, .connect = ip4_datagram_connect, - .disconnect = udp_disconnect, + .disconnect = __udp_disconnect, .ioctl = raw_ioctl, .init = raw_init, .setsockopt = raw_setsockopt, @@@ -953,7 -937,6 +953,7 @@@ .compat_getsockopt = compat_raw_getsockopt, .compat_ioctl = compat_raw_ioctl, #endif + .diag_destroy = raw_abort, };
#ifdef CONFIG_PROC_FS diff --combined net/ipv4/tcp_ipv4.c index 83b3d0b,61b7be3..b9b82826 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@@ -86,7 -86,6 +86,6 @@@
int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; - EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, @@@ -1887,13 -1886,12 +1886,12 @@@ static void *listening_get_next(struct struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; - struct inet_connection_sock *icsk; struct sock *sk = cur;
if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; - spin_lock_bh(&ilb->lock); + spin_lock(&ilb->lock); sk = sk_head(&ilb->head); st->offset = 0; goto get_sk; @@@ -1909,9 -1907,8 +1907,8 @@@ get_sk continue; if (sk->sk_family == st->family) return sk; - icsk = inet_csk(sk); } - spin_unlock_bh(&ilb->lock); + spin_unlock(&ilb->lock); st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) goto get_head; @@@ -2119,7 -2116,7 +2116,7 @@@ static void tcp_seq_stop(struct seq_fil switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) - spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); + spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock); break; case TCP_SEQ_STATE_ESTABLISHED: if (v) diff --combined net/ipv4/udp.c index c833271,d123d68..195992e --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@@ -1172,112 -1172,6 +1172,112 @@@ out return ret; }
+static void udp_rmem_release(struct sock *sk, int size, int partial) +{ + int amt; + + atomic_sub(size, &sk->sk_rmem_alloc); + + spin_lock_bh(&sk->sk_receive_queue.lock); + sk->sk_forward_alloc += size; + amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); + sk->sk_forward_alloc -= amt; + spin_unlock_bh(&sk->sk_receive_queue.lock); + + if (amt) + __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); +} + +static void udp_rmem_free(struct sk_buff *skb) +{ + udp_rmem_release(skb->sk, skb->truesize, 1); +} + +int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff_head *list = &sk->sk_receive_queue; + int rmem, delta, amt, err = -ENOMEM; + int size = skb->truesize; + + /* try to avoid the costly atomic add/sub pair when the receive + * queue is full; always allow at least a packet + */ + rmem = atomic_read(&sk->sk_rmem_alloc); + if (rmem && (rmem + size > sk->sk_rcvbuf)) + goto drop; + + /* we drop only if the receive buf is full and the receive + * queue contains some other skb + */ + rmem = atomic_add_return(size, &sk->sk_rmem_alloc); + if ((rmem > sk->sk_rcvbuf) && (rmem > size)) + goto uncharge_drop; + + spin_lock(&list->lock); + if (size >= sk->sk_forward_alloc) { + amt = sk_mem_pages(size); + delta = amt << SK_MEM_QUANTUM_SHIFT; + if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { + err = -ENOBUFS; + spin_unlock(&list->lock); + goto uncharge_drop; + } + + sk->sk_forward_alloc += delta; + } + + sk->sk_forward_alloc -= size; + + /* the skb owner in now the udp socket */ + skb->sk = sk; + skb->destructor = udp_rmem_free; + skb->dev = NULL; + sock_skb_set_dropcount(sk, skb); + + __skb_queue_tail(list, skb); + spin_unlock(&list->lock); + + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk); + + return 0; + +uncharge_drop: + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + +drop: + atomic_inc(&sk->sk_drops); + return err; +} +EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); + +static void udp_destruct_sock(struct sock *sk) +{ + /* reclaim completely the forward allocated memory */ + __skb_queue_purge(&sk->sk_receive_queue); + udp_rmem_release(sk, 0, 0); + inet_sock_destruct(sk); +} + +int udp_init_sock(struct sock *sk) +{ + sk->sk_destruct = udp_destruct_sock; + return 0; +} +EXPORT_SYMBOL_GPL(udp_init_sock); + +void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) +{ + if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { + bool slow = lock_sock_fast(sk); + + sk_peek_offset_bwd(sk, len); + unlock_sock_fast(sk, slow); + } + consume_skb(skb); +} +EXPORT_SYMBOL_GPL(skb_consume_udp); + /** * first_packet_length - return length of first packet in receive queue * @sk: socket @@@ -1307,7 -1201,13 +1307,7 @@@ static int first_packet_length(struct s res = skb ? skb->len : -1; spin_unlock_bh(&rcvq->lock);
- if (!skb_queue_empty(&list_kill)) { - bool slow = lock_sock_fast(sk); - - __skb_queue_purge(&list_kill); - sk_mem_reclaim_partial(sk); - unlock_sock_fast(sk, slow); - } + __skb_queue_purge(&list_kill); return res; }
@@@ -1356,6 -1256,7 +1356,6 @@@ int udp_recvmsg(struct sock *sk, struc int err; int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; - bool slow;
if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len, addr_len); @@@ -1396,12 -1297,13 +1396,12 @@@ try_again }
if (unlikely(err)) { - trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - skb_free_datagram_locked(sk, skb); + kfree_skb(skb); return err; }
@@@ -1420,21 -1322,22 +1420,21 @@@ *addr_len = sizeof(*sin); } if (inet->cmsg_flags) - ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off); + ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
err = copied; if (flags & MSG_TRUNC) err = ulen;
- __skb_free_datagram_locked(sk, skb, peeking ? -err : err); + skb_consume_udp(sk, skb, peeking ? -err : err); return err;
csum_copy_err: - slow = lock_sock_fast(sk); - if (!skb_kill_datagram(sk, skb, flags)) { + if (!__sk_queue_drop_skb(sk, skb, flags)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - unlock_sock_fast(sk, slow); + kfree_skb(skb);
/* starting over for a new packet, but check if we need to yield */ cond_resched(); @@@ -1442,7 -1345,7 +1442,7 @@@ goto try_again; }
- int udp_disconnect(struct sock *sk, int flags) + int __udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); /* @@@ -1464,6 -1367,15 +1464,15 @@@ sk_dst_reset(sk); return 0; } + EXPORT_SYMBOL(__udp_disconnect); + + int udp_disconnect(struct sock *sk, int flags) + { + lock_sock(sk); + __udp_disconnect(sk, flags); + release_sock(sk); + return 0; + } EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk) @@@ -1553,7 -1465,7 +1562,7 @@@ static int __udp_queue_rcv_skb(struct s sk_incoming_cpu_update(sk); }
- rc = __sock_queue_rcv_skb(sk, skb); + rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk);
@@@ -1568,6 -1480,7 +1577,6 @@@ }
return 0; - }
static struct static_key udp_encap_needed __read_mostly; @@@ -1589,6 -1502,7 +1598,6 @@@ EXPORT_SYMBOL(udp_encap_enable) int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); - int rc; int is_udplite = IS_UDPLITE(sk);
/* @@@ -1675,9 -1589,25 +1684,9 @@@ goto drop;
udp_csum_pull_header(skb); - if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { - __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, - is_udplite); - goto drop; - } - - rc = 0;
ipv4_pktinfo_prepare(sk, skb); - bh_lock_sock(sk); - if (!sock_owned_by_user(sk)) - rc = __udp_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { - bh_unlock_sock(sk); - goto drop; - } - bh_unlock_sock(sk); - - return rc; + return __udp_queue_rcv_skb(sk, skb);
csum_error: __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); @@@ -2272,7 -2202,7 +2281,7 @@@ int udp_abort(struct sock *sk, int err
sk->sk_err = err; sk->sk_error_report(sk); - udp_disconnect(sk, 0); + __udp_disconnect(sk, 0);
release_sock(sk);
@@@ -2287,13 -2217,13 +2296,13 @@@ struct proto udp_prot = .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, + .init = udp_init_sock, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, - .backlog_rcv = __udp_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, diff --combined net/ipv6/ip6_tunnel.c index 3a70567,8778456..03e050d --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@@ -157,6 -157,7 +157,7 @@@ ip6_tnl_lookup(struct net *net, const s hash = HASH(&any, local); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(local, &t->parms.laddr) && + ipv6_addr_any(&t->parms.raddr) && (t->dev->flags & IFF_UP)) return t; } @@@ -164,6 -165,7 +165,7 @@@ hash = HASH(remote, &any); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(remote, &t->parms.raddr) && + ipv6_addr_any(&t->parms.laddr) && (t->dev->flags & IFF_UP)) return t; } @@@ -1170,6 -1172,7 +1172,7 @@@ route_lookup if (err) return err;
+ skb->protocol = htons(ETH_P_IPV6); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); @@@ -1634,7 -1637,7 +1637,7 @@@ int ip6_tnl_change_mtu(struct net_devic struct ip6_tnl *tnl = netdev_priv(dev);
if (tnl->parms.proto == IPPROTO_IPIP) { - if (new_mtu < 68) + if (new_mtu < ETH_MIN_MTU) return -EINVAL; } else { if (new_mtu < IPV6_MIN_MTU) @@@ -1787,8 -1790,6 +1790,8 @@@ ip6_tnl_dev_init_gen(struct net_device dev->mtu = ETH_DATA_LEN - t_hlen; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = 0xFFF8 - dev->hard_header_len;
return 0;
diff --combined net/ipv6/raw.c index d7e8b95,054a1d8..610e093 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@@ -65,12 -65,11 +65,12 @@@
#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
-static struct raw_hashinfo raw_v6_hashinfo = { +struct raw_hashinfo raw_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), }; +EXPORT_SYMBOL_GPL(raw_v6_hashinfo);
-static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, +struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, const struct in6_addr *loc_addr, const struct in6_addr *rmt_addr, int dif) { @@@ -103,7 -102,6 +103,7 @@@ found: return sk; } +EXPORT_SYMBOL_GPL(__raw_v6_lookup);
/* * 0 - deliver @@@ -1243,7 -1241,7 +1243,7 @@@ struct proto rawv6_prot = .close = rawv6_close, .destroy = raw6_destroy, .connect = ip6_datagram_connect_v6_only, - .disconnect = udp_disconnect, + .disconnect = __udp_disconnect, .ioctl = rawv6_ioctl, .init = rawv6_init_sk, .setsockopt = rawv6_setsockopt, @@@ -1261,7 -1259,6 +1261,7 @@@ .compat_getsockopt = compat_rawv6_getsockopt, .compat_ioctl = compat_rawv6_ioctl, #endif + .diag_destroy = raw_abort, };
#ifdef CONFIG_PROC_FS diff --combined net/ipv6/udp.c index 71963b2,b2ef061..a7700bb --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@@ -334,6 -334,7 +334,6 @@@ int udpv6_recvmsg(struct sock *sk, stru int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; int is_udp4; - bool slow;
if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len, addr_len); @@@ -377,6 -378,7 +377,6 @@@ try_again goto csum_copy_err; } if (unlikely(err)) { - trace_kfree_skb(skb, udpv6_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) @@@ -386,7 -388,7 +386,7 @@@ UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - skb_free_datagram_locked(sk, skb); + kfree_skb(skb); return err; } if (!peeked) { @@@ -425,7 -427,8 +425,8 @@@
if (is_udp4) { if (inet->cmsg_flags) - ip_cmsg_recv(msg, skb); + ip_cmsg_recv_offset(msg, skb, + sizeof(struct udphdr), off); } else { if (np->rxopt.all) ip6_datagram_recv_specific_ctl(sk, msg, skb); @@@ -435,11 -438,12 +436,11 @@@ if (flags & MSG_TRUNC) err = ulen;
- __skb_free_datagram_locked(sk, skb, peeking ? -err : err); + skb_consume_udp(sk, skb, peeking ? -err : err); return err;
csum_copy_err: - slow = lock_sock_fast(sk); - if (!skb_kill_datagram(sk, skb, flags)) { + if (!__sk_queue_drop_skb(sk, skb, flags)) { if (is_udp4) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); @@@ -452,7 -456,7 +453,7 @@@ UDP_MIB_INERRORS, is_udplite); } } - unlock_sock_fast(sk, slow); + kfree_skb(skb);
/* starting over for a new packet, but check if we need to yield */ cond_resched(); @@@ -520,7 -524,7 +521,7 @@@ static int __udpv6_queue_rcv_skb(struc sk_incoming_cpu_update(sk); }
- rc = __sock_queue_rcv_skb(sk, skb); + rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk);
@@@ -532,7 -536,6 +533,7 @@@ kfree_skb(skb); return -1; } + return 0; }
@@@ -554,6 -557,7 +555,6 @@@ EXPORT_SYMBOL(udpv6_encap_enable) int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); - int rc; int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) @@@ -619,10 -623,25 +620,10 @@@ goto drop;
udp_csum_pull_header(skb); - if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { - __UDP6_INC_STATS(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); - goto drop; - }
skb_dst_drop(skb);
- bh_lock_sock(sk); - rc = 0; - if (!sock_owned_by_user(sk)) - rc = __udpv6_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { - bh_unlock_sock(sk); - goto drop; - } - bh_unlock_sock(sk); - - return rc; + return __udpv6_queue_rcv_skb(sk, skb);
csum_error: __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); @@@ -1415,12 -1434,12 +1416,12 @@@ struct proto udpv6_prot = .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, + .init = udp_init_sock, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, - .backlog_rcv = __udpv6_queue_rcv_skb, .release_cb = ip6_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, diff --combined net/mac80211/rx.c index 21a8947,a47bbc9..eeab725 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -1394,15 -1394,13 +1394,15 @@@ void ieee80211_sta_uapsd_trigger(struc u8 ac = ieee802_1d_to_ac[tid & 7];
/* - * If this AC is not trigger-enabled do nothing. + * If this AC is not trigger-enabled do nothing unless the + * driver is calling us after it already checked. * * NB: This could/should check a separate bitmap of trigger- * enabled queues, but for now we only implement uAPSD w/o * TSPEC changes to the ACs, so they're always the same. */ - if (!(sta->sta.uapsd_queues & BIT(ac))) + if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && + tid != IEEE80211_NUM_TIDS) return;
/* if we are in a service period, do nothing */ @@@ -2217,8 -2215,7 +2217,8 @@@ ieee80211_deliver_skb(struct ieee80211_ sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { - if (is_multicast_ether_addr(ehdr->h_dest)) { + if (is_multicast_ether_addr(ehdr->h_dest) && + ieee80211_vif_get_num_mcast_if(sdata) != 0) { /* * send multicast frames both to higher layers in * local net stack and back to the wireless medium @@@ -2227,7 -2224,7 +2227,7 @@@ if (!xmit_skb) net_info_ratelimited("%s: failed to clone multicast frame\n", dev->name); - } else { + } else if (!is_multicast_ether_addr(ehdr->h_dest)) { dsta = sta_info_get(sdata, skb->data); if (dsta) { /* @@@ -2301,6 -2298,8 +2301,8 @@@ ieee80211_rx_h_amsdu(struct ieee80211_r __le16 fc = hdr->frame_control; struct sk_buff_head frame_list; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct ethhdr ethhdr; + const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
if (unlikely(!ieee80211_is_data(fc))) return RX_CONTINUE; @@@ -2311,24 -2310,53 +2313,53 @@@ if (!(status->rx_flags & IEEE80211_RX_AMSDU)) return RX_CONTINUE;
- if (ieee80211_has_a4(hdr->frame_control) && - rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - !rx->sdata->u.vlan.sta) - return RX_DROP_UNUSABLE; + if (unlikely(ieee80211_has_a4(hdr->frame_control))) { + switch (rx->sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + if (!rx->sdata->u.vlan.sta) + return RX_DROP_UNUSABLE; + break; + case NL80211_IFTYPE_STATION: + if (!rx->sdata->u.mgd.use_4addr) + return RX_DROP_UNUSABLE; + break; + default: + return RX_DROP_UNUSABLE; + } + check_da = NULL; + check_sa = NULL; + } else switch (rx->sdata->vif.type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + check_da = NULL; + break; + case NL80211_IFTYPE_STATION: + if (!rx->sta || + !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) + check_sa = NULL; + break; + case NL80211_IFTYPE_MESH_POINT: + check_sa = NULL; + break; + default: + break; + }
- if (is_multicast_ether_addr(hdr->addr1) && - ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - rx->sdata->u.vlan.sta) || - (rx->sdata->vif.type == NL80211_IFTYPE_STATION && - rx->sdata->u.mgd.use_4addr))) + if (is_multicast_ether_addr(hdr->addr1)) return RX_DROP_UNUSABLE;
skb->dev = dev; __skb_queue_head_init(&frame_list);
+ if (ieee80211_data_to_8023_exthdr(skb, ðhdr, + rx->sdata->vif.addr, + rx->sdata->vif.type)) + return RX_DROP_UNUSABLE; + ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, rx->sdata->vif.type, - rx->local->hw.extra_tx_headroom, true); + rx->local->hw.extra_tx_headroom, + check_da, check_sa);
while (!skb_queue_empty(&frame_list)) { rx->skb = __skb_dequeue(&frame_list); diff --combined net/mac80211/wpa.c index c249345,42ce9bd..8af6dd3 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@@ -57,7 -57,7 +57,7 @@@ ieee80211_tx_h_michael_mic_add(struct i
if (info->control.hw_key && (info->flags & IEEE80211_TX_CTL_DONTFRAG || - tx->local->ops->set_frag_threshold) && + ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) && !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { /* hwaccel - with no need for SW-generated MMIC */ return TX_CONTINUE; @@@ -405,7 -405,7 +405,7 @@@ static int ccmp_encrypt_skb(struct ieee u8 *pos; u8 pn[6]; u64 pn64; - u8 aad[2 * AES_BLOCK_SIZE]; + u8 aad[CCM_AAD_LEN]; u8 b_0[AES_BLOCK_SIZE];
if (info->control.hw_key && @@@ -461,10 -461,8 +461,8 @@@
pos += IEEE80211_CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, b_0, aad); - ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, - skb_put(skb, mic_len), mic_len); - - return 0; + return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, + skb_put(skb, mic_len), mic_len); }
@@@ -639,7 -637,7 +637,7 @@@ static int gcmp_encrypt_skb(struct ieee u8 *pos; u8 pn[6]; u64 pn64; - u8 aad[2 * AES_BLOCK_SIZE]; + u8 aad[GCM_AAD_LEN]; u8 j_0[AES_BLOCK_SIZE];
if (info->control.hw_key && @@@ -696,10 -694,8 +694,8 @@@
pos += IEEE80211_GCMP_HDR_LEN; gcmp_special_blocks(skb, pn, j_0, aad); - ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, - skb_put(skb, IEEE80211_GCMP_MIC_LEN)); - - return 0; + return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, + skb_put(skb, IEEE80211_GCMP_MIC_LEN)); }
ieee80211_tx_result @@@ -1123,9 -1119,9 +1119,9 @@@ ieee80211_crypto_aes_gmac_encrypt(struc struct ieee80211_key *key = tx->key; struct ieee80211_mmie_16 *mmie; struct ieee80211_hdr *hdr; - u8 aad[20]; + u8 aad[GMAC_AAD_LEN]; u64 pn64; - u8 nonce[12]; + u8 nonce[GMAC_NONCE_LEN];
if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; @@@ -1171,7 -1167,7 +1167,7 @@@ ieee80211_crypto_aes_gmac_decrypt(struc struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; - u8 aad[20], mic[16], ipn[6], nonce[12]; + u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control)) diff --combined net/rds/rds.h index 25532a4,67ba67c..4121e18 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@@ -33,7 -33,7 +33,7 @@@ #define KERNEL_HAS_ATOMIC64 #endif
- #ifdef DEBUG + #ifdef RDS_DEBUG #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) #else /* sigh, pr_debug() causes unused variable warnings */ @@@ -683,6 -683,10 +683,6 @@@ void rds_for_each_conn_info(struct sock struct rds_info_lengths *lens, int (*visitor)(struct rds_connection *, void *), size_t item_len); -__printf(2, 3) -void __rds_conn_error(struct rds_connection *conn, const char *, ...); -#define rds_conn_error(conn, fmt...) \ - __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
__printf(2, 3) void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...); diff --combined net/sched/act_mirred.c index 2d93be6,6b07fba..6073a11 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@@ -33,25 -33,6 +33,25 @@@ static LIST_HEAD(mirred_list); static DEFINE_SPINLOCK(mirred_list_lock);
+static bool tcf_mirred_is_act_redirect(int action) +{ + return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; +} + +static u32 tcf_mirred_act_direction(int action) +{ + switch (action) { + case TCA_EGRESS_REDIR: + case TCA_EGRESS_MIRROR: + return AT_EGRESS; + case TCA_INGRESS_REDIR: + case TCA_INGRESS_MIRROR: + return AT_INGRESS; + default: + BUG(); + } +} + static void tcf_mirred_release(struct tc_action *a, int bind) { struct tcf_mirred *m = to_mirred(a); @@@ -73,32 -54,17 +73,32 @@@ static const struct nla_policy mirred_p static int mirred_net_id; static struct tc_action_ops act_mirred_ops;
+static bool dev_is_mac_header_xmit(const struct net_device *dev) +{ + switch (dev->type) { + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + return false; + } + return true; +} + static int tcf_mirred_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, mirred_net_id); struct nlattr *tb[TCA_MIRRED_MAX + 1]; + bool mac_header_xmit = false; struct tc_mirred *parm; struct tcf_mirred *m; struct net_device *dev; - int ret, ok_push = 0; bool exists = false; + int ret;
if (nla == NULL) return -EINVAL; @@@ -116,8 -82,6 +116,8 @@@ switch (parm->eaction) { case TCA_EGRESS_MIRROR: case TCA_EGRESS_REDIR: + case TCA_INGRESS_REDIR: + case TCA_INGRESS_MIRROR: break; default: if (exists) @@@ -131,7 -95,19 +131,7 @@@ tcf_hash_release(*a, bind); return -ENODEV; } - switch (dev->type) { - case ARPHRD_TUNNEL: - case ARPHRD_TUNNEL6: - case ARPHRD_SIT: - case ARPHRD_IPGRE: - case ARPHRD_VOID: - case ARPHRD_NONE: - ok_push = 0; - break; - default: - ok_push = 1; - break; - } + mac_header_xmit = dev_is_mac_header_xmit(dev); } else { dev = NULL; } @@@ -160,7 -136,7 +160,7 @@@ dev_put(rcu_dereference_protected(m->tcfm_dev, 1)); dev_hold(dev); rcu_assign_pointer(m->tcfm_dev, dev); - m->tcfm_ok_push = ok_push; + m->tcfm_mac_header_xmit = mac_header_xmit; }
if (ret == ACT_P_CREATED) { @@@ -177,20 -153,15 +177,20 @@@ static int tcf_mirred(struct sk_buff *s struct tcf_result *res) { struct tcf_mirred *m = to_mirred(a); + bool m_mac_header_xmit; struct net_device *dev; struct sk_buff *skb2; - int retval, err; + int retval, err = 0; + int m_eaction; + int mac_len; u32 at;
tcf_lastuse_update(&m->tcf_tm); bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
rcu_read_lock(); + m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); + m_eaction = READ_ONCE(m->tcfm_eaction); retval = READ_ONCE(m->tcf_action); dev = rcu_dereference(m->tcfm_dev); if (unlikely(!dev)) { @@@ -209,36 -180,23 +209,36 @@@ if (!skb2) goto out;
- if (!(at & AT_EGRESS)) { - if (m->tcfm_ok_push) + /* If action's target direction differs than filter's direction, + * and devices expect a mac header on xmit, then mac push/pull is + * needed. + */ + if (at != tcf_mirred_act_direction(m_eaction) && m_mac_header_xmit) { + if (at & AT_EGRESS) { + /* caught at egress, act ingress: pull mac */ + mac_len = skb_network_header(skb) - skb_mac_header(skb); + skb_pull_rcsum(skb2, mac_len); + } else { + /* caught at ingress, act egress: push mac */ skb_push_rcsum(skb2, skb->mac_len); + } }
/* mirror is always swallowed */ - if (m->tcfm_eaction != TCA_EGRESS_MIRROR) + if (tcf_mirred_is_act_redirect(m_eaction)) skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; - err = dev_queue_xmit(skb2); + if (tcf_mirred_act_direction(m_eaction) & AT_EGRESS) + err = dev_queue_xmit(skb2); + else + err = netif_receive_skb(skb2);
if (err) { out: qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats)); - if (m->tcfm_eaction != TCA_EGRESS_MIRROR) + if (tcf_mirred_is_act_redirect(m_eaction)) retval = TC_ACT_SHOT; } rcu_read_unlock(); @@@ -249,8 -207,11 +249,11 @@@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, u64 lastuse) { - tcf_lastuse_update(&a->tcfa_tm); + struct tcf_mirred *m = to_mirred(a); + struct tcf_t *tm = &m->tcf_tm; + _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); + tm->lastuse = lastuse; }
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, diff --combined net/sctp/output.c index 4282b48,6cb0df8..7b50e43 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@@ -418,6 -418,7 +418,7 @@@ int sctp_packet_transmit(struct sctp_pa __u8 has_data = 0; int gso = 0; int pktcount = 0; + int auth_len = 0; struct dst_entry *dst; unsigned char *auth = NULL; /* pointer to auth in skb data */
@@@ -510,7 -511,12 +511,12 @@@ list_for_each_entry(chunk, &packet->chunk_list, list) { int padded = SCTP_PAD4(chunk->skb->len);
- if (pkt_size + padded > tp->pathmtu) + if (chunk == packet->auth) + auth_len = padded; + else if (auth_len + padded + packet->overhead > + tp->pathmtu) + goto nomem; + else if (pkt_size + padded > tp->pathmtu) break; pkt_size += padded; } @@@ -552,8 -558,7 +558,8 @@@ * for a given destination transport address. */
- if (!chunk->resent && !tp->rto_pending) { + if (!sctp_chunk_retransmitted(chunk) && + !tp->rto_pending) { chunk->rtt_in_progress = 1; tp->rto_pending = 1; } @@@ -866,6 -871,9 +872,6 @@@ static void sctp_packet_append_data(str rwnd = 0;
asoc->peer.rwnd = rwnd; - /* Has been accepted for transmission. */ - if (!asoc->peer.prsctp_capable) - chunk->msg->can_abandon = 0; sctp_chunk_assign_tsn(chunk); sctp_chunk_assign_ssn(chunk); } diff --combined net/switchdev/switchdev.c index 6f145b5,3b95fe9..017801f --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@@ -624,10 -624,13 +624,10 @@@ EXPORT_SYMBOL_GPL(unregister_switchdev_ int call_switchdev_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info) { - int err; - ASSERT_RTNL();
info->dev = dev; - err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); - return err; + return raw_notifier_call_chain(&switchdev_notif_chain, val, info); } EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
@@@ -768,6 -771,9 +768,9 @@@ int switchdev_port_bridge_getlink(struc u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; int err;
+ if (!netif_is_bridge_port(dev)) + return -EOPNOTSUPP; + err = switchdev_port_attr_get(dev, &attr); if (err && err != -EOPNOTSUPP) return err; @@@ -923,6 -929,9 +926,9 @@@ int switchdev_port_bridge_setlink(struc struct nlattr *afspec; int err = 0;
+ if (!netif_is_bridge_port(dev)) + return -EOPNOTSUPP; + protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); if (protinfo) { @@@ -956,6 -965,9 +962,9 @@@ int switchdev_port_bridge_dellink(struc { struct nlattr *afspec;
+ if (!netif_is_bridge_port(dev)) + return -EOPNOTSUPP; + afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (afspec) diff --combined net/wireless/util.c index 32060f8,5ea12af..88725f8 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@@ -13,7 -13,6 +13,7 @@@ #include <net/dsfield.h> #include <linux/if_vlan.h> #include <linux/mpls.h> +#include <linux/gcd.h> #include "core.h" #include "rdev-ops.h"
@@@ -421,8 -420,8 +421,8 @@@ unsigned int ieee80211_get_mesh_hdrlen( } EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
- static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, - const u8 *addr, enum nl80211_iftype iftype) + int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct { @@@ -526,13 -525,7 +526,7 @@@
return 0; } - - int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype) - { - return __ieee80211_data_to_8023(skb, NULL, addr, iftype); - } - EXPORT_SYMBOL(ieee80211_data_to_8023); + EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype, @@@ -747,24 -740,18 +741,18 @@@ __ieee80211_amsdu_copy(struct sk_buff * void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, - bool has_80211_header) + const u8 *check_da, const u8 *check_sa) { unsigned int hlen = ALIGN(extra_headroom, 4); struct sk_buff *frame = NULL; u16 ethertype; u8 *payload; - int offset = 0, remaining, err; + int offset = 0, remaining; struct ethhdr eth; bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); bool reuse_skb = false; bool last = false;
- if (has_80211_header) { - err = __ieee80211_data_to_8023(skb, ð, addr, iftype); - if (err) - goto out; - } - while (!last) { unsigned int subframe_len; int len; @@@ -781,8 -768,17 +769,17 @@@ goto purge;
offset += sizeof(struct ethhdr); - /* reuse skb for the last subframe */ last = remaining <= subframe_len + padding; + + /* FIXME: should we really accept multicast DA? */ + if ((check_da && !is_multicast_ether_addr(eth.h_dest) && + !ether_addr_equal(check_da, eth.h_dest)) || + (check_sa && !ether_addr_equal(check_sa, eth.h_source))) { + offset += len + padding; + continue; + } + + /* reuse skb for the last subframe */ if (!skb_is_nonlinear(skb) && !reuse_frag && last) { skb_pull(skb, offset); frame = skb; @@@ -820,7 -816,6 +817,6 @@@
purge: __skb_queue_purge(list); - out: dev_kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); @@@ -1382,25 -1377,6 +1378,25 @@@ static bool ieee80211_id_in_list(const return false; }
+static size_t skip_ie(const u8 *ies, size_t ielen, size_t pos) +{ + /* we assume a validly formed IEs buffer */ + u8 len = ies[pos + 1]; + + pos += 2 + len; + + /* the IE itself must have 255 bytes for fragments to follow */ + if (len < 255) + return pos; + + while (pos < ielen && ies[pos] == WLAN_EID_FRAGMENT) { + len = ies[pos + 1]; + pos += 2 + len; + } + + return pos; +} + size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, const u8 *after_ric, int n_after_ric, @@@ -1410,14 -1386,14 +1406,14 @@@
while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) { if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) { - pos += 2 + ies[pos + 1]; + pos = skip_ie(ies, ielen, pos);
while (pos < ielen && !ieee80211_id_in_list(after_ric, n_after_ric, ies[pos])) - pos += 2 + ies[pos + 1]; + pos = skip_ie(ies, ielen, pos); } else { - pos += 2 + ies[pos + 1]; + pos = skip_ie(ies, ielen, pos); } }
@@@ -1578,57 -1554,31 +1574,57 @@@ bool ieee80211_chandef_to_operating_cla } EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
-int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, - u32 beacon_int) +static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, + u32 *beacon_int_gcd, + bool *beacon_int_different) { struct wireless_dev *wdev; - int res = 0;
- if (beacon_int < 10 || beacon_int > 10000) - return -EINVAL; + *beacon_int_gcd = 0; + *beacon_int_different = false;
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + list_for_each_entry(wdev, &wiphy->wdev_list, list) { if (!wdev->beacon_interval) continue; - if (wdev->beacon_interval != beacon_int) { - res = -EINVAL; - break; + + if (!*beacon_int_gcd) { + *beacon_int_gcd = wdev->beacon_interval; + continue; } + + if (wdev->beacon_interval == *beacon_int_gcd) + continue; + + *beacon_int_different = true; + *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval); }
- return res; + if (new_beacon_int && *beacon_int_gcd != new_beacon_int) { + if (*beacon_int_gcd) + *beacon_int_different = true; + *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int); + } +} + +int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, + enum nl80211_iftype iftype, u32 beacon_int) +{ + /* + * This is just a basic pre-condition check; if interface combinations + * are possible the driver must already be checking those with a call + * to cfg80211_check_combinations(), in which case we'll validate more + * through the cfg80211_calculate_bi_data() call and code in + * cfg80211_iter_combinations(). + */ + + if (beacon_int < 10 || beacon_int > 10000) + return -EINVAL; + + return 0; }
int cfg80211_iter_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES], + struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data) @@@ -1638,23 -1588,8 +1634,23 @@@ int i, j, iftype; int num_interfaces = 0; u32 used_iftypes = 0; + u32 beacon_int_gcd; + bool beacon_int_different; + + /* + * This is a bit strange, since the iteration used to rely only on + * the data given by the driver, but here it now relies on context, + * in form of the currently operating interfaces. + * This is OK for all current users, and saves us from having to + * push the GCD calculations into all the drivers. + * In the future, this should probably rely more on data that's in + * cfg80211 already - the only thing not would appear to be any new + * interfaces (while being brought up) and channel/radar data. + */ + cfg80211_calculate_bi_data(wiphy, params->new_beacon_int, + &beacon_int_gcd, &beacon_int_different);
- if (radar_detect) { + if (params->radar_detect) { rcu_read_lock(); regdom = rcu_dereference(cfg80211_regdomain); if (regdom) @@@ -1663,8 -1598,8 +1659,8 @@@ }
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { - num_interfaces += iftype_num[iftype]; - if (iftype_num[iftype] > 0 && + num_interfaces += params->iftype_num[iftype]; + if (params->iftype_num[iftype] > 0 && !(wiphy->software_iftypes & BIT(iftype))) used_iftypes |= BIT(iftype); } @@@ -1678,7 -1613,7 +1674,7 @@@
if (num_interfaces > c->max_interfaces) continue; - if (num_different_channels > c->num_different_channels) + if (params->num_different_channels > c->num_different_channels) continue;
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, @@@ -1693,17 -1628,16 +1689,17 @@@ all_iftypes |= limits[j].types; if (!(limits[j].types & BIT(iftype))) continue; - if (limits[j].max < iftype_num[iftype]) + if (limits[j].max < params->iftype_num[iftype]) goto cont; - limits[j].max -= iftype_num[iftype]; + limits[j].max -= params->iftype_num[iftype]; } }
- if (radar_detect != (c->radar_detect_widths & radar_detect)) + if (params->radar_detect != + (c->radar_detect_widths & params->radar_detect)) goto cont;
- if (radar_detect && c->radar_detect_regions && + if (params->radar_detect && c->radar_detect_regions && !(c->radar_detect_regions & BIT(region))) goto cont;
@@@ -1715,14 -1649,6 +1711,14 @@@ if ((all_iftypes & used_iftypes) != used_iftypes) goto cont;
+ if (beacon_int_gcd) { + if (c->beacon_int_min_gcd && + beacon_int_gcd < c->beacon_int_min_gcd) + goto cont; + if (!c->beacon_int_min_gcd && beacon_int_different) + goto cont; + } + /* This combination covered all interface types and * supported the requested numbers, so we're good. */ @@@ -1745,11 -1671,14 +1741,11 @@@ cfg80211_iter_sum_ifcombs(const struct }
int cfg80211_check_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES]) + struct iface_combination_params *params) { int err, num = 0;
- err = cfg80211_iter_combinations(wiphy, num_different_channels, - radar_detect, iftype_num, + err = cfg80211_iter_combinations(wiphy, params, cfg80211_iter_sum_ifcombs, &num); if (err) return err;
linux-merge@lists.open-mesh.org