The following commit has been merged in the master branch: commit c4c5551df136a7c4edd7c2f433d9a296b39826a2 Merge: 40999f11ce677ce3c5d0e8f5f76c40192a26b479 48e5aee81f320da8abd1f09c8410f584315f59b0 Author: David S. Miller davem@davemloft.net Date: Fri Jul 20 14:45:10 2018 -0700
Merge ra.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux
All conflicts were trivial overlapping changes, so reasonably easy to resolve.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined Documentation/devicetree/bindings/net/fsl-fman.txt index 74603dd0789e,f8c33890bc29..299c0dcd67db --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@@ -238,7 -238,7 +238,7 @@@ PROPERTIE Must include one of the following: - "fsl,fman-dtsec" for dTSEC MAC - "fsl,fman-xgec" for XGEC MAC - - "fsl,fman-memac for mEMAC MAC + - "fsl,fman-memac" for mEMAC MAC
- cell-index Usage: required @@@ -356,7 -356,30 +356,7 @@@ ethernet@e0000 ============================================================================ FMan IEEE 1588 Node
-DESCRIPTION - -The FMan interface to support IEEE 1588 - - -PROPERTIES - -- compatible - Usage: required - Value type: <stringlist> - Definition: A standard property. - Must include "fsl,fman-ptp-timer". - -- reg - Usage: required - Value type: <prop-encoded-array> - Definition: A standard property. - -EXAMPLE - -ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; -}; +Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
============================================================================= FMan MDIO Node diff --combined MAINTAINERS index 130ff2f4d33a,1505c8ea8e7b..fce04d747a1a --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -581,7 -581,7 +581,7 @@@ W: https://www.infradead.org/~dhowells/
AGPGART DRIVER M: David Airlie airlied@linux.ie - T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) + T: git git://anongit.freedesktop.org/drm/drm S: Maintained F: drivers/char/agp/ F: include/linux/agp* @@@ -2523,7 -2523,7 +2523,7 @@@ S: Supporte F: drivers/scsi/esas2r
ATUSB IEEE 802.15.4 RADIO DRIVER - M: Stefan Schmidt stefan@osg.samsung.com + M: Stefan Schmidt stefan@datenfreihafen.org L: linux-wpan@vger.kernel.org S: Maintained F: drivers/net/ieee802154/atusb.c @@@ -4460,6 -4460,7 +4460,7 @@@ F: Documentation/blockdev/drbd
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman gregkh@linuxfoundation.org + R: "Rafael J. Wysocki" rafael@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: Documentation/kobject.txt @@@ -4630,7 -4631,7 +4631,7 @@@ F: include/uapi/drm/vmwgfx_drm. DRM DRIVERS M: David Airlie airlied@linux.ie L: dri-devel@lists.freedesktop.org - T: git git://people.freedesktop.org/~airlied/linux + T: git git://anongit.freedesktop.org/drm/drm B: https://bugs.freedesktop.org/ C: irc://chat.freenode.net/dri-devel S: Maintained @@@ -5789,7 -5790,6 +5790,6 @@@ F: include/linux/fsl
FREESCALE SOC FS_ENET DRIVER M: Pantelis Antoniou pantelis.antoniou@gmail.com - M: Vitaly Bordug vbordug@ru.mvista.com L: linuxppc-dev@lists.ozlabs.org L: netdev@vger.kernel.org S: Maintained @@@ -6908,7 -6908,7 +6908,7 @@@ F: drivers/clk/clk-versaclock5.
IEEE 802.15.4 SUBSYSTEM M: Alexander Aring alex.aring@gmail.com - M: Stefan Schmidt stefan@osg.samsung.com + M: Stefan Schmidt stefan@datenfreihafen.org L: linux-wpan@vger.kernel.org W: http://wpan.cakelab.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git @@@ -8628,7 -8628,7 +8628,7 @@@ MARVELL MWIFIEX WIRELESS DRIVE M: Amitkumar Karwar amitkarwar@gmail.com M: Nishant Sarmukadam nishants@marvell.com M: Ganapathi Bhat gbhat@marvell.com - M: Xinming Hu huxm@marvell.com + M: Xinming Hu huxinming820@gmail.com L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/marvell/mwifiex/ @@@ -9158,7 -9158,6 +9158,7 @@@ S: Supporte W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxsw/ +F: tools/testing/selftests/drivers/net/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) M: mlxsw@mellanox.com @@@ -10214,11 -10213,13 +10214,13 @@@ F: sound/soc/codecs/sgtl5000
NXP TDA998X DRM DRIVER M: Russell King linux@armlinux.org.uk - S: Supported + S: Maintained T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h + F: include/dt-bindings/display/tda998x.h + K: "nxp,tda998x"
NXP TFA9879 DRIVER M: Peter Rosin peda@axentia.se @@@ -11836,7 -11837,7 +11838,7 @@@ S: Supporte F: arch/hexagon/
QUALCOMM HIDMA DRIVER - M: Sinan Kaya okaya@codeaurora.org + M: Sinan Kaya okaya@kernel.org L: linux-arm-kernel@lists.infradead.org L: linux-arm-msm@vger.kernel.org L: dmaengine@vger.kernel.org @@@ -12063,13 -12064,6 +12065,13 @@@ S: Maintaine F: sound/soc/codecs/rt* F: include/sound/rt*.h
+REALTEK RTL83xx SMI DSA ROUTER CHIPS +M: Linus Walleij linus.walleij@linaro.org +S: Maintained +F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt +F: drivers/net/dsa/realtek-smi* +F: drivers/net/dsa/rtl83* + REGISTER MAP ABSTRACTION M: Mark Brown broonie@kernel.org L: linux-kernel@vger.kernel.org @@@ -12177,8 -12171,6 +12179,8 @@@ S: Maintaine F: Documentation/rfkill.txt F: Documentation/ABI/stable/sysfs-class-rfkill F: net/rfkill/ +F: include/linux/rfkill.h +F: include/uapi/linux/rfkill.h
RHASHTABLE M: Thomas Graf tgraf@suug.ch @@@ -12186,9 -12178,7 +12188,9 @@@ M: Herbert Xu <herbert@gondor.apana.org L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c +F: lib/test_rhashtable.c F: include/linux/rhashtable.h +F: include/linux/rhashtable-types.h
RICOH R5C592 MEMORYSTICK DRIVER M: Maxim Levitsky maximlevitsky@gmail.com diff --combined drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 1a51152029c3,2c6ebd91a9f2..5c00671f248d --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@@ -24,10 -24,8 +24,10 @@@ struct aq_hw_caps_s u64 link_speed_msk; unsigned int hw_priv_flags; u32 media_type; - u32 rxds; - u32 txds; + u32 rxds_max; + u32 txds_max; + u32 rxds_min; + u32 txds_min; u32 txhwb_alignment; u32 irq_mask; u32 vecs; @@@ -100,9 -98,8 +100,11 @@@ struct aq_stats_s #define AQ_HW_MEDIA_TYPE_TP 1U #define AQ_HW_MEDIA_TYPE_FIBRE 2U
+#define AQ_HW_TXD_MULTIPLE 8U +#define AQ_HW_RXD_MULTIPLE 8U + + #define AQ_HW_MULTICAST_ADDRESS_MAX 32U + struct aq_hw_s { atomic_t flags; u8 rbl_enabled:1; @@@ -182,7 -179,7 +184,7 @@@ struct aq_hw_ops unsigned int packet_filter);
int (*hw_multicast_list_set)(struct aq_hw_s *self, - u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] + u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count);
@@@ -202,30 -199,25 +204,30 @@@
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
- int (*hw_deinit)(struct aq_hw_s *self); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); };
struct aq_fw_ops { int (*init)(struct aq_hw_s *self);
+ int (*deinit)(struct aq_hw_s *self); + int (*reset)(struct aq_hw_s *self);
+ int (*renegotiate)(struct aq_hw_s *self); + int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
- int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); + int (*set_state)(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state);
int (*update_link_status)(struct aq_hw_s *self);
int (*update_stats)(struct aq_hw_s *self); + + int (*set_flow_control)(struct aq_hw_s *self); };
#endif /* AQ_HW_H */ diff --combined drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 21cfb327d791,7a22d0257e04..26dc6782b475 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@@ -89,8 -89,8 +89,8 @@@ void aq_nic_cfg_start(struct aq_nic_s * aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */ - cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF); - cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF); + cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); + cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
/*rss rings */ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); @@@ -563,34 -563,41 +563,41 @@@ err_exit
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) { + unsigned int packet_filter = self->packet_filter; struct netdev_hw_addr *ha = NULL; unsigned int i = 0U;
- self->mc_list.count = 0U; - - netdev_for_each_mc_addr(ha, ndev) { - ether_addr_copy(self->mc_list.ar[i++], ha->addr); - ++self->mc_list.count; + self->mc_list.count = 0; + if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_PROMISC; + } else { + netdev_for_each_uc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr);
- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) - break; + if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) + break; + } }
- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { - /* Number of filters is too big: atlantic does not support this. - * Force all multi filter to support this. - * With this we disable all UC filters and setup "all pass" - * multicast mask - */ - self->packet_filter |= IFF_ALLMULTI; - self->aq_nic_cfg.mc_list_count = 0; - return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, - self->packet_filter); + if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_ALLMULTI; } else { - return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, - self->mc_list.ar, - self->mc_list.count); + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); + + if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) + break; + } + } + + if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_MULTICAST; + self->mc_list.count = i; + self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); } + return aq_nic_set_packet_filter(self, packet_filter); }
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) @@@ -761,14 -768,10 +768,14 @@@ void aq_nic_get_link_ksettings(struct a ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- if (self->aq_nic_cfg.flow_control) + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); else @@@ -883,7 -886,7 +890,7 @@@ void aq_nic_deinit(struct aq_nic_s *sel aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_hw_ops->hw_deinit(self->aq_hw); + (void)self->aq_fw_ops->deinit(self->aq_hw); } else { (void)self->aq_hw_ops->hw_set_power(self->aq_hw, self->power_state); diff --combined drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index ed7fe6f2e360,8cc6abadc03b..97addfa6f895 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@@ -19,31 -19,29 +19,31 @@@ #include "hw_atl_a0_internal.h"
#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_A0_RSS_MAX, \ - .tcs = HW_ATL_A0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_A0_RXD_SIZE, \ - .rxds = 248U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_A0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_A0_TX_RINGS, \ - .rx_rings = HW_ATL_A0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_A0_RSS_MAX, \ + .tcs = HW_ATL_A0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_A0_RXD_SIZE, \ + .rxds_max = HW_ATL_A0_MAX_RXD, \ + .rxds_min = HW_ATL_A0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_A0_TXD_SIZE, \ + .txds_max = HW_ATL_A0_MAX_TXD, \ + .txds_min = HW_ATL_A0_MIN_RXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_A0_TX_RINGS, \ + .rx_rings = HW_ATL_A0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO, \ .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_A0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .flow_control = true, \ + .mtu = HW_ATL_A0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { @@@ -767,7 -765,7 +767,7 @@@ static int hw_atl_a0_hw_packet_filter_s
static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, u8 ar_mac - [AQ_CFG_MULTICAST_ADDRESS_MAX] + [AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count) { @@@ -877,6 -875,7 +877,6 @@@ static int hw_atl_a0_hw_ring_rx_stop(st const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, diff --combined drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 9dd4f497676c,956860a69797..4809bf4baa34 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@@ -20,32 -20,30 +20,32 @@@ #include "hw_atl_llh_internal.h"
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_B0_RSS_MAX, \ - .tcs = HW_ATL_B0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_B0_RXD_SIZE, \ - .rxds = 4U * 1024U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_B0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_B0_TX_RINGS, \ - .rx_rings = HW_ATL_B0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXCSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_LRO, \ - .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_B0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_B0_RSS_MAX, \ + .tcs = HW_ATL_B0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_B0_RXD_SIZE, \ + .rxds_max = HW_ATL_B0_MAX_RXD, \ + .rxds_min = HW_ATL_B0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_B0_TXD_SIZE, \ + .txds_max = HW_ATL_B0_MAX_TXD, \ + .txds_min = HW_ATL_B0_MIN_TXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_B0_TX_RINGS, \ + .rx_rings = HW_ATL_B0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_LRO, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_B0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { @@@ -786,7 -784,7 +786,7 @@@ static int hw_atl_b0_hw_packet_filter_s
static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, u8 ar_mac - [AQ_CFG_MULTICAST_ADDRESS_MAX] + [AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count) { @@@ -814,7 -812,7 +814,7 @@@
hw_atl_rpfl2_uc_flr_en_set(self, (self->aq_nic_cfg->is_mc_list_enabled), - HW_ATL_B0_MAC_MIN + i); + HW_ATL_B0_MAC_MIN + i); }
err = aq_hw_err_from_flags(self); @@@ -935,6 -933,7 +935,6 @@@ static int hw_atl_b0_hw_ring_rx_stop(st const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, diff --combined drivers/net/ethernet/broadcom/bcmsysport.c index eb890c4b3b2d,a1f60f89e059..631617d95769 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@@ -1946,8 -1946,8 +1946,8 @@@ static int bcm_sysport_open(struct net_ if (!priv->is_lite) priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); else - priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & - GIB_FCS_STRIP); + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); @@@ -2107,7 -2107,7 +2107,7 @@@ static const struct ethtool_ops bcm_sys };
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@@ -2116,7 -2116,7 +2116,7 @@@ unsigned int q, port;
if (!netdev_uses_dsa(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */ q = BRCM_TAG_GET_QUEUE(queue); @@@ -2124,7 -2124,7 +2124,7 @@@ tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring)) - return fallback(dev, skb); + return fallback(dev, skb, NULL);
return tx_ring->index; } diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2cf726e31461,4394c1162be4..c612d74451a7 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -1727,7 -1727,7 +1727,7 @@@ static int bnxt_async_event_process(str speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); - /* fall thru */ + /* fall through */ } case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); @@@ -3012,6 -3012,13 +3012,6 @@@ static void bnxt_free_hwrm_resources(st bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL; - if (bp->hwrm_dbg_resp_addr) { - dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, - bp->hwrm_dbg_resp_addr, - bp->hwrm_dbg_resp_dma_addr); - - bp->hwrm_dbg_resp_addr = NULL; - } }
static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@@ -3023,6 -3030,12 +3023,6 @@@ GFP_KERNEL); if (!bp->hwrm_cmd_resp_addr) return -ENOMEM; - bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, - HWRM_DBG_REG_BUF_SIZE, - &bp->hwrm_dbg_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_dbg_resp_addr) - netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
return 0; } @@@ -5699,7 -5712,9 +5699,9 @@@ static int bnxt_init_chip(struct bnxt * } vnic->uc_filter_count = 1;
- vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + vnic->rx_mask = 0; + if (bp->dev->flags & IFF_BROADCAST) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; @@@ -5904,7 -5919,7 +5906,7 @@@ unsigned int bnxt_get_max_func_irqs(str return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); }
- void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) + static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) { bp->hw_resc.max_irqs = max_irqs; } @@@ -6875,7 -6890,7 +6877,7 @@@ static int __bnxt_open_nic(struct bnxt rc = bnxt_request_irq(bp); if (rc) { netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); - goto open_err; + goto open_err_irq; } }
@@@ -6915,6 -6930,8 +6917,8 @@@ open_err: bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); + + open_err_irq: bnxt_del_napi(bp);
open_err_free_mem: @@@ -7201,13 -7218,16 +7205,16 @@@ static void bnxt_set_rx_mode(struct net
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | - CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); + CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | + CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
+ if (dev->flags & IFF_BROADCAST) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if (dev->flags & IFF_ALLMULTI) { mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; @@@ -7971,7 -7991,7 +7978,7 @@@ static int bnxt_setup_tc_block(struct n switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp); + bp, bp, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); return 0; @@@ -8489,11 -8509,11 +8496,11 @@@ int bnxt_get_max_rings(struct bnxt *bp int rx, tx, cp;
_bnxt_get_max_rings(bp, &rx, &tx, &cp); + *max_rx = rx; + *max_tx = tx; if (!rx || !tx || !cp) return -ENOMEM;
- *max_rx = rx; - *max_tx = tx; return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); }
@@@ -8507,8 -8527,11 +8514,11 @@@ static int bnxt_get_dflt_rings(struct b /* Not enough rings, try disabling agg rings. */ bp->flags &= ~BNXT_FLAG_AGG_RINGS; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) + if (rc) { + /* set BNXT_FLAG_AGG_RINGS back for consistency */ + bp->flags |= BNXT_FLAG_AGG_RINGS; return rc; + } bp->flags |= BNXT_FLAG_NO_AGG_RINGS; bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.h index 709ba86d3a02,91575ef97c8c..934aa11c82eb --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@@ -1287,6 -1287,9 +1287,6 @@@ struct bnxt dma_addr_t hwrm_short_cmd_req_dma_addr; void *hwrm_cmd_resp_addr; dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_dbg_resp_addr; - dma_addr_t hwrm_dbg_resp_dma_addr; -#define HWRM_DBG_REG_BUF_SIZE 128
struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; @@@ -1467,7 -1470,6 +1467,6 @@@ void bnxt_set_max_func_stat_ctxs(struc unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); - void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max); int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d0699f39ba34,491bd40a254d..139d96c5a023 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@@ -27,6 -27,15 +27,15 @@@ #define BNXT_FID_INVALID 0xffff #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
+ #define is_vlan_pcp_wildcarded(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000) + #define is_vlan_pcp_exactmatch(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK) + #define is_vlan_pcp_zero(vlan_tci) \ + ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000) + #define is_vid_exactmatch(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) + /* Return the dst fid of the func for flow forwarding * For PFs: src_fid is the fid of the PF * For VF-reps: src_fid the fid of the VF @@@ -389,6 -398,21 +398,21 @@@ static bool is_exactmatch(void *mask, i return true; }
+ static bool is_vlan_tci_allowed(__be16 vlan_tci_mask, + __be16 vlan_tci) + { + /* VLAN priority must be either exactly zero or fully wildcarded and + * VLAN id must be exact match. + */ + if (is_vid_exactmatch(vlan_tci_mask) && + ((is_vlan_pcp_exactmatch(vlan_tci_mask) && + is_vlan_pcp_zero(vlan_tci)) || + is_vlan_pcp_wildcarded(vlan_tci_mask))) + return true; + + return false; + } + static bool bits_set(void *key, int len) { const u8 *p = key; @@@ -803,9 -827,9 +827,9 @@@ static bool bnxt_tc_can_offload(struct /* Currently VLAN fields cannot be partial wildcard */ if (bits_set(&flow->l2_key.inner_vlan_tci, sizeof(flow->l2_key.inner_vlan_tci)) && - !is_exactmatch(&flow->l2_mask.inner_vlan_tci, - sizeof(flow->l2_mask.inner_vlan_tci))) { - netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); + !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, + flow->l2_key.inner_vlan_tci)) { + netdev_info(bp->dev, "Unsupported VLAN TCI\n"); return false; } if (bits_set(&flow->l2_key.inner_vlan_tpid, @@@ -1544,16 -1568,22 +1568,16 @@@ void bnxt_tc_flow_stats_work(struct bnx int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower) { - int rc = 0; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); - break; - + return bnxt_tc_add_flow(bp, src_fid, cls_flower); case TC_CLSFLOWER_DESTROY: - rc = bnxt_tc_del_flow(bp, cls_flower); - break; - + return bnxt_tc_del_flow(bp, cls_flower); case TC_CLSFLOWER_STATS: - rc = bnxt_tc_get_flow_stats(bp, cls_flower); - break; + return bnxt_tc_get_flow_stats(bp, cls_flower); + default: + return -EOPNOTSUPP; } - return rc; }
static const struct rhashtable_params bnxt_tc_flow_ht_params = { diff --combined drivers/net/ethernet/broadcom/tg3.c index 0a796d5ec893,aa1374d0af93..d8dad07f826a --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -6,11 -6,15 +6,15 @@@ * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2005-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Limited. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries. * * Firmware is: * Derived from proprietary unpublished source code, * Copyright (C) 2000-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Ltd. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries. * * Permission is hereby granted for the distribution of this firmware * data in hexadecimal or equivalent format, provided this copyright @@@ -721,7 -725,6 +725,7 @@@ static int tg3_ape_lock(struct tg3 *tp case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@@ -782,7 -785,6 +786,7 @@@ static void tg3_ape_unlock(struct tg3 * case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@@ -9292,6 -9294,15 +9296,15 @@@ static int tg3_chip_reset(struct tg3 *t
tg3_restore_clk(tp);
+ /* Increase the core clock speed to fix tx timeout issue for 5762 + * with 100Mbps link speed. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | + TG3_CPMU_MAC_ORIDE_ENABLE); + } + /* Reprobe ASF enable state. */ tg3_flag_clear(tp, ENABLE_ASF); tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | @@@ -10708,40 -10719,28 +10721,40 @@@ static int tg3_reset_hw(struct tg3 *tp switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + /* fall through */ case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + /* fall through */ case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + /* fall through */ case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + /* fall through */ case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + /* fall through */ case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + /* fall through */ case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + /* fall through */ case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + /* fall through */ case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + /* fall through */ case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + /* fall through */ case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + /* fall through */ case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + /* fall through */ case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: diff --combined drivers/net/ethernet/cavium/Kconfig index 80e2e93e4aad,92d88c5f76fb..5f03199a3acf --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@@ -4,6 -4,7 +4,6 @@@
config NET_VENDOR_CAVIUM bool "Cavium ethernet drivers" - depends on PCI default y ---help--- Select this option if you want enable Cavium network support. @@@ -33,9 -34,9 +33,9 @@@ config THUNDER_NIC_V
config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" - depends on 64BIT + depends on 64BIT && PCI select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI select THUNDER_NIC_RGX ---help--- This driver supports programming and controlling of MAC @@@ -43,17 -44,16 +43,16 @@@
config THUNDER_NIC_RGX tristate "Thunder MAC interface driver (RGX)" - depends on 64BIT + depends on 64BIT && PCI select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI ---help--- This driver supports configuring XCV block of RGX interface present on CN81XX chip.
config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" - depends on 64BIT - depends on PCI + depends on 64BIT && PCI imply PTP_1588_CLOCK default y ---help--- @@@ -65,9 -65,8 +64,9 @@@
config LIQUIDIO tristate "Cavium LiquidIO support" - depends on 64BIT + depends on 64BIT && PCI depends on MAY_USE_DEVLINK + depends on PCI imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C diff --combined drivers/net/ethernet/cavium/liquidio/lio_main.c index 4980eca87667,7e8454d3b1ad..8ef87a76692b --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@@ -91,6 -91,9 +91,9 @@@ static int octeon_console_debug_enabled */ #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
+ /* time to wait for possible in-flight requests in milliseconds */ + #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) + struct lio_trusted_vf_ctx { struct completion complete; int status; @@@ -259,7 -262,7 +262,7 @@@ static inline void pcierror_quiesce_dev force_io_queues_off(oct);
/* To allow for in-flight requests */ - schedule_timeout_uninterruptible(100); + schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
if (wait_for_pending_requests(oct)) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); @@@ -684,7 -687,7 +687,7 @@@ static void lio_sync_octeon_time(struc lt = (struct lio_time *)sc->virtdptr;
/* Get time of the day */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); lt->sec = ts.tv_sec; lt->nsec = ts.tv_nsec; octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); @@@ -2628,7 -2631,7 +2631,7 @@@ static int liquidio_vlan_rx_kill_vid(st
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; @@@ -2906,7 -2909,7 +2909,7 @@@ static int liquidio_set_vf_vlan(struct vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl); @@@ -3065,7 -3068,7 +3068,7 @@@ static int liquidio_set_vf_link_state(s nctrl.ncmd.s.param2 = linkstate; nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl); @@@ -3299,9 -3302,7 +3302,9 @@@ static int setup_nic_devices(struct oct { struct lio *lio = NULL; struct net_device *netdev; - u8 mac[6], i, j, *fw_ver; + u8 mac[6], i, j, *fw_ver, *micro_ver; + unsigned long micro; + u32 cur_ver; struct octeon_soft_command *sc; struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; @@@ -3431,14 -3432,6 +3434,14 @@@ fw_ver); }
+ /* extract micro version field; point past '<maj>.<min>.' */ + micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; + if (kstrtoul(micro_ver, 10, µ) != 0) + micro = 0; + octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; + octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; + octeon_dev->fw_info.ver.rev = micro; + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3);
@@@ -3579,8 -3572,9 +3582,8 @@@ for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { u8 vfmac[ETH_ALEN];
- random_ether_addr(&vfmac[0]); - if (__liquidio_set_vf_mac(netdev, j, - &vfmac[0], false)) { + eth_random_addr(vfmac); + if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); @@@ -3681,19 -3675,7 +3684,19 @@@ OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { - liquidio_get_speed(lio); + cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, + octeon_dev->fw_info.ver.min, + octeon_dev->fw_info.ver.rev); + + /* speed control unsupported in f/w older than 1.7.2 */ + if (cur_ver < OCT_FW_VER(1, 7, 2)) { + dev_info(&octeon_dev->pci_dev->dev, + "speed setting not supported by f/w."); + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } else { + liquidio_get_speed(lio); + }
if (octeon_dev->speed_setting == 0) { octeon_dev->speed_setting = 25; diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index d266177aeef5,3720c3e11ebb..2d9943f90a75 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@@ -2882,57 -2882,6 +2882,57 @@@ int t4_get_vpd_params(struct adapter *a return 0; }
+/** + * t4_get_pfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a physical + * function. The results are stored in @adapter->pfres. + */ +int t4_get_pfres(struct adapter *adapter) +{ + struct pf_resources *pfres = &adapter->params.pfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PFVF_CMD_PFN_V(adapter->pf) | + FW_PFVF_CMD_VFN_V(0)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* Extract PF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + pfres->niq = FW_PFVF_CMD_NIQ_G(word); + + word = be32_to_cpu(rpl.type_to_neq); + pfres->neq = FW_PFVF_CMD_NEQ_G(word); + pfres->pmask = FW_PFVF_CMD_PMASK_G(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + pfres->tc = FW_PFVF_CMD_TC_G(word); + pfres->nvi = FW_PFVF_CMD_NVI_G(word); + pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); + + return 0; +} + /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ @@@ -8753,7 -8702,7 +8753,7 @@@ static int t4_get_flash_params(struct a };
unsigned int part, manufacturer; - unsigned int density, size; + unsigned int density, size = 0; u32 flashid = 0; int ret;
@@@ -8823,11 -8772,6 +8823,6 @@@ case 0x22: /* 256MB */ size = 1 << 28; break; - - default: - dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } @@@ -8843,10 -8787,6 +8838,6 @@@ case 0x17: /* 64MB */ size = 1 << 26; break; - default: - dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } @@@ -8862,10 -8802,6 +8853,6 @@@ case 0x18: /* 16MB */ size = 1 << 24; break; - default: - dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } @@@ -8881,17 -8817,21 +8868,21 @@@ case 0x18: /* 16MB */ size = 1 << 24; break; - default: - dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } - default: - dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n", - flashid); - return -EINVAL; + } + + /* If we didn't recognize the FLASH part, that's no real issue: the + * Hardware/Software contract says that Hardware will _*ALWAYS*_ + * use a FLASH part which is at least 4MB in size and has 64KB + * sectors. The unrecognized FLASH part is likely to be much larger + * than 4MB, but that's all we really need. + */ + if (size == 0) { + dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n", + flashid); + size = 1 << 22; }
/* Store decoded Flash size and fall through into vetting code. */ diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e51c8dc52f37,77b2adb29341..8d67f0123699 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@@ -48,7 -48,6 +48,7 @@@ #include <linux/route.h> #include <linux/gcd.h> #include <linux/random.h> +#include <linux/if_macvlan.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> @@@ -61,7 -60,6 +61,7 @@@ #include <net/ndisc.h> #include <net/ipv6.h> #include <net/fib_notifier.h> +#include <net/switchdev.h>
#include "spectrum.h" #include "core.h" @@@ -165,9 -163,7 +165,9 @@@ struct mlxsw_sp_rif_ops const struct mlxsw_sp_rif_params *params); int (*configure)(struct mlxsw_sp_rif *rif); void (*deconfigure)(struct mlxsw_sp_rif *rif); - struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif); + struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack); + void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); };
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); @@@ -346,6 -342,10 +346,6 @@@ static void mlxsw_sp_rif_counters_free( mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); }
-static struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); - #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
struct mlxsw_sp_prefix_usage { @@@ -1109,8 -1109,7 +1109,8 @@@ mlxsw_sp_fib_entry_decap_init(struct ml u32 tunnel_index; int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, &tunnel_index); if (err) return err;
@@@ -1126,8 -1125,7 +1126,8 @@@ static void mlxsw_sp_fib_entry_decap_fi /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; - mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, fib_entry->decap.tunnel_index); }
static struct mlxsw_sp_fib_node * @@@ -3167,9 -3165,8 +3167,9 @@@ static int mlxsw_sp_fix_adj_grp_size(st * by the device and make sure the request can be satisfied. */ mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); - err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size, - &alloc_size); + err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + *p_adj_grp_size, &alloc_size); if (err) return err; /* It is possible the allocation results in more allocated @@@ -3281,8 -3278,7 +3281,8 @@@ mlxsw_sp_nexthop_group_refresh(struct m /* No valid allocation size available. */ goto set_trap;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + ecmp_size, &adj_index); if (err) { /* We ran out of KVD linear space, just set the * trap and let everything flow through kernel. @@@ -3317,8 -3313,7 +3317,8 @@@
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, old_adj_index, old_ecmp_size); - mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + old_ecmp_size, old_adj_index); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; @@@ -3340,8 -3335,7 +3340,8 @@@ set_trap if (err) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); if (old_adj_index_valid) - mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + nh_grp->ecmp_size, nh_grp->adj_index); }
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, @@@ -4762,6 -4756,12 +4762,12 @@@ static void mlxsw_sp_rt6_destroy(struc kfree(mlxsw_sp_rt6); }
+ static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) + { + /* RTF_CACHE routes are ignored */ + return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; + } + static struct fib6_info * mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) { @@@ -4771,11 -4771,11 +4777,11 @@@
static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct fib6_info *nrt, bool append) + const struct fib6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry;
- if (!append) + if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { @@@ -4790,7 -4790,8 +4796,8 @@@ break; if (rt->fib6_metric < nrt->fib6_metric) continue; - if (rt->fib6_metric == nrt->fib6_metric) + if (rt->fib6_metric == nrt->fib6_metric && + mlxsw_sp_fib6_rt_can_mp(rt)) return fib6_entry; if (rt->fib6_metric > nrt->fib6_metric) break; @@@ -5169,7 -5170,7 +5176,7 @@@ static struct mlxsw_sp_fib6_entry mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, const struct fib6_info *nrt, bool replace) { - struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); @@@ -5178,13 -5179,18 +5185,18 @@@ continue; if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (replace && rt->fib6_metric == nrt->fib6_metric) - return fib6_entry; + if (replace && rt->fib6_metric == nrt->fib6_metric) { + if (mlxsw_sp_fib6_rt_can_mp(rt) == + mlxsw_sp_fib6_rt_can_mp(nrt)) + return fib6_entry; + if (mlxsw_sp_fib6_rt_can_mp(nrt)) + fallback = fallback ?: fib6_entry; + } if (rt->fib6_metric > nrt->fib6_metric) - return fib6_entry; + return fallback ?: fib6_entry; }
- return NULL; + return fallback; }
static int @@@ -5310,8 -5316,7 +5322,7 @@@ static void mlxsw_sp_fib6_entry_replace }
static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, - struct fib6_info *rt, bool replace, - bool append) + struct fib6_info *rt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@@ -5337,7 -5342,7 +5348,7 @@@ /* Before creating a new entry, try to append route to an existing * multipath entry. */ - fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); if (fib6_entry) { err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); if (err) @@@ -5345,14 -5350,6 +5356,6 @@@ return 0; }
- /* We received an append event, yet did not find any route to - * append to. - */ - if (WARN_ON(append)) { - err = -EINVAL; - goto err_fib6_entry_append; - } - fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); if (IS_ERR(fib6_entry)) { err = PTR_ERR(fib6_entry); @@@ -5370,7 -5367,6 +5373,6 @@@ err_fib6_node_entry_link: mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); err_fib6_entry_create: - err_fib6_entry_append: err_fib6_entry_nexthop_add: mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; @@@ -5721,7 -5717,7 +5723,7 @@@ static void mlxsw_sp_router_fib6_event_ struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - bool replace, append; + bool replace; int err;
rtnl_lock(); @@@ -5732,10 -5728,8 +5734,8 @@@ case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; - append = fib_work->event == FIB_EVENT_ENTRY_APPEND; err = mlxsw_sp_router_fib6_add(mlxsw_sp, - fib_work->fen6_info.rt, replace, - append); + fib_work->fen6_info.rt, replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); mlxsw_sp_rt6_release(fib_work->fen6_info.rt); @@@ -5973,7 -5967,7 +5973,7 @@@ static int mlxsw_sp_router_fib_event(st return NOTIFY_DONE; }
-static struct mlxsw_sp_rif * +struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { @@@ -6030,12 -6024,6 +6030,12 @@@ mlxsw_sp_rif_should_config(struct mlxsw !list_empty(&inet6_dev->addr_list)) addr_list_empty = false;
+ /* macvlans do not have a RIF, but rather piggy back on the + * RIF of their lower device. + */ + if (netif_is_macvlan(dev) && addr_list_empty) + return true; + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; @@@ -6137,11 -6125,6 +6137,11 @@@ const struct net_device *mlxsw_sp_rif_d return rif->dev; }
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) +{ + return rif->fid; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params, @@@ -6179,7 -6162,7 +6179,7 @@@ rif->ops = ops;
if (ops->fid_get) { - fid = ops->fid_get(rif); + fid = ops->fid_get(rif, extack); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto err_fid_get; @@@ -6284,7 -6267,7 +6284,7 @@@ mlxsw_sp_port_vlan_router_join(struct m }
/* FID was already created, just take a reference */ - fid = rif->ops->fid_get(rif); + fid = rif->ops->fid_get(rif, extack); err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); if (err) goto err_fid_port_vid_map; @@@ -6449,123 -6432,6 +6449,123 @@@ static int mlxsw_sp_inetaddr_vlan_event return 0; }
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac) +{ + u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp4, mask); +} + +static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac) +{ + u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp6, mask); +} + +static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + const u8 *mac, bool adding) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + u8 vrrp_id = adding ? mac[5] : 0; + int err; + + if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) && + !mlxsw_sp_rif_macvlan_is_vrrp6(mac)) + return 0; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + if (mlxsw_sp_rif_macvlan_is_vrrp4(mac)) + mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id); + else + mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev, + struct netlink_ext_ack *extack) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + int err; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + if (!rif) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + return err; + + err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, + macvlan_dev->dev_addr, true); + if (err) + goto err_rif_vrrp_add; + + /* Make sure the bridge driver does not have this MAC pointing at + * some other port. + */ + if (rif->ops->fdb_del) + rif->ops->fdb_del(rif, macvlan_dev->dev_addr); + + return 0; + +err_rif_vrrp_add: + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); + return err; +} + +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + /* If we do not have a RIF, then we already took care of + * removing the macvlan's MAC during RIF deletion. + */ + if (!rif) + return; + mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr, + false); + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp; + + mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); + case NETDEV_DOWN: + mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev); + break; + } + + return 0; +} + static int __mlxsw_sp_inetaddr_event(struct net_device *dev, unsigned long event, struct netlink_ext_ack *extack) @@@ -6578,8 -6444,6 +6578,8 @@@ return mlxsw_sp_inetaddr_bridge_event(dev, event, extack); else if (is_vlan_dev(dev)) return mlxsw_sp_inetaddr_vlan_event(dev, event, extack); + else if (netif_is_macvlan(dev)) + return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack); else return 0; } @@@ -6820,10 -6684,7 +6820,10 @@@ int mlxsw_sp_netdevice_vrf_event(struc struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); int err = 0;
- if (!mlxsw_sp) + /* We do not create a RIF for a macvlan, but only use it to + * direct more MAC addresses to the router. + */ + if (!mlxsw_sp || netif_is_macvlan(l3_dev)) return 0;
switch (event) { @@@ -6844,27 -6705,6 +6844,27 @@@ return err; }
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) +{ + struct mlxsw_sp_rif *rif = data; + + if (!netif_is_macvlan(dev)) + return 0; + + return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) +{ + if (!netif_is_macvlan_port(rif->dev)) + return 0; + + netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); + return netdev_walk_all_upper_dev_rcu(rif->dev, + __mlxsw_sp_rif_macvlan_flush, rif); +} + static struct mlxsw_sp_rif_subport * mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) { @@@ -6931,13 -6771,11 +6931,13 @@@ static void mlxsw_sp_rif_subport_deconf mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_rif_subport_op(rif, false); }
static struct mlxsw_sp_fid * -mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); } @@@ -7019,7 -6857,6 +7019,7 @@@ static void mlxsw_sp_rif_vlan_deconfigu mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@@ -7028,49 -6865,19 +7028,49 @@@ }
static struct mlxsw_sp_fid * -mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { - u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1; + u16 vid; + int err; + + if (is_vlan_dev(rif->dev)) { + vid = vlan_dev_vlan_id(rif->dev); + } else { + err = br_vlan_get_pvid(rif->dev, &vid); + if (err < 0 || !vid) { + NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); + return ERR_PTR(-EINVAL); + } + }
return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); }
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); + struct switchdev_notifier_fdb_info info; + struct net_device *br_dev; + struct net_device *dev; + + br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev; + dev = br_fdb_find_port(br_dev, mac, vid); + if (!dev) + return; + + info.addr = mac; + info.vid = vid; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { .type = MLXSW_SP_RIF_TYPE_VLAN, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_vlan_configure, .deconfigure = mlxsw_sp_rif_vlan_deconfigure, .fid_get = mlxsw_sp_rif_vlan_fid_get, + .fdb_del = mlxsw_sp_rif_vlan_fdb_del, };
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) @@@ -7122,7 -6929,6 +7122,7 @@@ static void mlxsw_sp_rif_fid_deconfigur mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@@ -7131,33 -6937,17 +7131,33 @@@ }
static struct mlxsw_sp_fid * -mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); }
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + struct switchdev_notifier_fdb_info info; + struct net_device *dev; + + dev = br_fdb_find_port(rif->dev, mac, 0); + if (!dev) + return; + + info.addr = mac; + info.vid = 0; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { .type = MLXSW_SP_RIF_TYPE_FID, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_fid_configure, .deconfigure = mlxsw_sp_rif_fid_deconfigure, .fid_get = mlxsw_sp_rif_fid_fid_get, + .fdb_del = mlxsw_sp_rif_fid_fdb_del, };
static struct mlxsw_sp_rif_ipip_lb * diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index 158944aa6097,758a9a5127fa..dbe81310c0b6 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -371,7 -371,7 +371,7 @@@ static struct qed_dev *qed_probe(struc goto err2; }
- DP_INFO(cdev, "qed_probe completed successffuly\n"); + DP_INFO(cdev, "qed_probe completed successfully\n");
return cdev;
@@@ -2102,28 -2102,6 +2102,28 @@@ out return status; }
+static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, + u8 dev_addr, u32 offset, u32 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, + offset, len, buf); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, @@@ -2166,7 -2144,6 +2166,7 @@@ const struct qed_common_ops qed_common_ .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .read_module_eeprom = &qed_read_module_eeprom, };
void qed_get_protocol_stats(struct qed_dev *cdev, diff --combined drivers/net/ethernet/qlogic/qed/qed_mcp.c index 62a220fce6e1,9d9e533bccdc..15f1b3294289 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@@ -592,6 -592,9 +592,9 @@@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param;
+ /* nvm_info needs to be updated */ + p_hwfn->nvm_info.valid = false; + return 0; }
@@@ -2463,55 -2466,6 +2466,55 @@@ out return rc; }
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) +{ + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; + u32 resp, param; + int rc; + + nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; + nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; + + addr = offset; + offset = 0; + bytes_left = len; + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return -ENODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return -EINVAL; + + offset += buf_size; + bytes_left -= buf_size; + } + + return 0; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; @@@ -2604,11 -2558,14 +2607,14 @@@ int qed_mcp_bist_nvm_get_image_att(stru
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) { - struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info; + struct qed_nvm_image_info nvm_info; struct qed_ptt *p_ptt; int rc; u32 i;
+ if (p_hwfn->nvm_info.valid) + return 0; + p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(p_hwfn, "failed to acquire ptt\n"); @@@ -2616,29 -2573,29 +2622,29 @@@ }
/* Acquire from MFW the amount of available images */ - nvm_info->num_images = 0; + nvm_info.num_images = 0; rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, - p_ptt, &nvm_info->num_images); + p_ptt, &nvm_info.num_images); if (rc == -EOPNOTSUPP) { DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); goto out; - } else if (rc || !nvm_info->num_images) { + } else if (rc || !nvm_info.num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); goto err0; }
- nvm_info->image_att = kmalloc_array(nvm_info->num_images, - sizeof(struct bist_nvm_image_att), - GFP_KERNEL); - if (!nvm_info->image_att) { + nvm_info.image_att = kmalloc_array(nvm_info.num_images, + sizeof(struct bist_nvm_image_att), + GFP_KERNEL); + if (!nvm_info.image_att) { rc = -ENOMEM; goto err0; }
/* Iterate over images and get their attributes */ - for (i = 0; i < nvm_info->num_images; i++) { + for (i = 0; i < nvm_info.num_images; i++) { rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, - &nvm_info->image_att[i], i); + &nvm_info.image_att[i], i); if (rc) { DP_ERR(p_hwfn, "Failed getting image index %d attributes\n", i); @@@ -2646,14 -2603,22 +2652,22 @@@ }
DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, - nvm_info->image_att[i].len); + nvm_info.image_att[i].len); } out: + /* Update hwfn's nvm_info */ + if (nvm_info.num_images) { + p_hwfn->nvm_info.num_images = nvm_info.num_images; + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = nvm_info.image_att; + p_hwfn->nvm_info.valid = true; + } + qed_ptt_release(p_hwfn, p_ptt); return 0;
err1: - kfree(nvm_info->image_att); + kfree(nvm_info.image_att); err0: qed_ptt_release(p_hwfn, p_ptt); return rc; @@@ -2690,6 -2655,7 +2704,7 @@@ qed_mcp_get_nvm_image_att(struct qed_hw return -EINVAL; }
+ qed_mcp_nvm_info_populate(p_hwfn); for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; diff --combined drivers/net/ethernet/realtek/r8169.c index 3d50378a11d7,a3f69901ac87..49a6e25ddc2b --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -15,7 -15,7 +15,7 @@@ #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> -#include <linux/mii.h> +#include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> @@@ -25,6 -25,7 +25,6 @@@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> -#include <linux/pci-aspm.h> #include <linux/prefetch.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@@ -34,6 -35,7 +34,6 @@@
#define RTL8169_VERSION "2.3LK-NAPI" #define MODULENAME "r8169" -#define PFX MODULENAME ": "
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" @@@ -55,6 -57,19 +55,6 @@@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
-#ifdef RTL8169_DEBUG -#define assert(expr) \ - if (!(expr)) { \ - printk( "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr,__FILE__,__func__,__LINE__); \ - } -#define dprintk(fmt, args...) \ - do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) -#else -#define assert(expr) do {} while (0) -#define dprintk(fmt, args...) do {} while (0) -#endif /* RTL8169_DEBUG */ - #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
@@@ -80,6 -95,7 +80,6 @@@ static const int multicast_filter_limi #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ) -#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) @@@ -383,6 -399,12 +383,6 @@@ enum rtl_registers FuncForceEvent = 0xfc, };
-enum rtl8110_registers { - TBICSR = 0x64, - TBI_ANAR = 0x68, - TBI_LPAR = 0x6a, -}; - enum rtl8168_8101_registers { CSIDR = 0x64, CSIAR = 0x68, @@@ -549,6 -571,14 +549,6 @@@ enum rtl_register_content PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ ASPM_en = (1 << 0), /* ASPM enable */
- /* TBICSR p.28 */ - TBIReset = 0x80000000, - TBILoopback = 0x40000000, - TBINwEnable = 0x20000000, - TBINwRestart = 0x10000000, - TBILinkOk = 0x02000000, - TBINwComplete = 0x01000000, - /* CPlusCmd p.31 */ EnableBist = (1 << 15), // 8168 8101 Mac_dbgo_oe = (1 << 14), // 8168 8101 @@@ -702,6 -732,7 +702,6 @@@ enum rtl_flag RTL_FLAG_TASK_ENABLED, RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_PHY_PENDING, RTL_FLAG_MAX };
@@@ -729,6 -760,7 +729,6 @@@ struct rtl8169_private dma_addr_t RxPhyAddr; void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - struct timer_list timer; u16 cp_cmd;
u16 event_slow; @@@ -744,7 -776,14 +744,7 @@@ void (*disable)(struct rtl8169_private *); } jumbo_ops;
- int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_link_ksettings)(struct net_device *, - struct ethtool_link_ksettings *); - void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct rtl8169_private *tp); - unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(struct rtl8169_private *tp); - int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
struct { @@@ -753,8 -792,7 +753,8 @@@ struct work_struct work; } wk;
- struct mii_if_info mii; + unsigned supports_gmii:1; + struct mii_bus *mii_bus; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; @@@ -1105,6 -1143,21 +1105,6 @@@ static void rtl_w0w1_phy(struct rtl8169 rtl_writephy(tp, reg_addr, (val & ~m) | p); }
-static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, - int val) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl_writephy(tp, location, val); -} - -static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return rtl_readphy(tp, location); -} - DECLARE_RTL_COND(rtl_ephyar_cond) { return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; @@@ -1425,22 -1478,54 +1425,22 @@@ static void rtl8169_irq_mask_and_ack(st RTL_R8(tp, ChipCmd); }
-static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBIReset; -} - -static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) -{ - return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; -} - -static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBILinkOk; -} - -static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) -{ - return RTL_R8(tp, PHYstatus) & LinkStatus; -} - -static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) -{ - RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); -} - -static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) -{ - unsigned int val; - - val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; - rtl_writephy(tp, MII_BMCR, val & 0xffff); -} - static void rtl_link_chg_patch(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; + struct phy_device *phydev = dev->phydev;
if (!netif_running(dev)) return;
if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, ERIAR_EXGMAC); - } else if (RTL_R8(tp, PHYstatus) & _100bps) { + } else if (phydev->speed == SPEED_100) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@@ -1458,7 -1543,7 +1458,7 @@@ ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@@ -1470,7 -1555,7 +1470,7 @@@ ERIAR_EXGMAC); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (RTL_R8(tp, PHYstatus) & _10bps) { + if (phydev->speed == SPEED_10) { rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, @@@ -1482,6 -1567,25 +1482,6 @@@ } }
-static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp) -{ - struct device *d = tp_to_dev(tp); - - if (tp->link_ok(tp)) { - rtl_link_chg_patch(tp); - /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(d); - netif_carrier_on(dev); - if (net_ratelimit()) - netif_info(tp, ifup, dev, "link up\n"); - } else { - netif_carrier_off(dev); - netif_info(tp, ifdown, dev, "link down\n"); - pm_runtime_idle(d); - } -} - #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@@ -1522,11 -1626,21 +1522,11 @@@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - - pm_runtime_get_noresume(d);
rtl_lock_work(tp); - wol->supported = WAKE_ANY; - if (pm_runtime_active(d)) - wol->wolopts = __rtl8169_get_wol(tp); - else - wol->wolopts = tp->saved_wolopts; - + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); - - pm_runtime_put_noidle(d); }
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@@ -1602,21 -1716,18 +1602,21 @@@ static int rtl8169_set_wol(struct net_d struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp);
+ if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + pm_runtime_get_noresume(d);
rtl_lock_work(tp);
+ tp->saved_wolopts = wol->wolopts; + if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, wol->wolopts); - else - tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- device_set_wakeup_enable(d, wol->wolopts); + device_set_wakeup_enable(d, tp->saved_wolopts);
pm_runtime_put_noidle(d);
@@@ -1648,6 -1759,124 +1648,6 @@@ static int rtl8169_get_regs_len(struct return R8169_REGS_SIZE; }
-static int rtl8169_set_speed_tbi(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 ignored) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret = 0; - u32 reg; - - reg = RTL_R32(tp, TBICSR); - if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && - (duplex == DUPLEX_FULL)) { - RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); - } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); - else { - netif_warn(tp, link, dev, - "incorrect speed setting refused in TBI mode\n"); - ret = -EOPNOTSUPP; - } - - return ret; -} - -static int rtl8169_set_speed_xmii(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 adv) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int giga_ctrl, bmcr; - int rc = -EINVAL; - - rtl_writephy(tp, 0x1f, 0x0000); - - if (autoneg == AUTONEG_ENABLE) { - int auto_nego; - - auto_nego = rtl_readphy(tp, MII_ADVERTISE); - auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - - if (adv & ADVERTISED_10baseT_Half) - auto_nego |= ADVERTISE_10HALF; - if (adv & ADVERTISED_10baseT_Full) - auto_nego |= ADVERTISE_10FULL; - if (adv & ADVERTISED_100baseT_Half) - auto_nego |= ADVERTISE_100HALF; - if (adv & ADVERTISED_100baseT_Full) - auto_nego |= ADVERTISE_100FULL; - - auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - giga_ctrl = rtl_readphy(tp, MII_CTRL1000); - giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - - /* The 8100e/8101e/8102e do Fast Ethernet only. */ - if (tp->mii.supports_gmii) { - if (adv & ADVERTISED_1000baseT_Half) - giga_ctrl |= ADVERTISE_1000HALF; - if (adv & ADVERTISED_1000baseT_Full) - giga_ctrl |= ADVERTISE_1000FULL; - } else if (adv & (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full)) { - netif_info(tp, link, dev, - "PHY does not support 1000Mbps\n"); - goto out; - } - - bmcr = BMCR_ANENABLE | BMCR_ANRESTART; - - rtl_writephy(tp, MII_ADVERTISE, auto_nego); - rtl_writephy(tp, MII_CTRL1000, giga_ctrl); - } else { - if (speed == SPEED_10) - bmcr = 0; - else if (speed == SPEED_100) - bmcr = BMCR_SPEED100; - else - goto out; - - if (duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - } - - rtl_writephy(tp, MII_BMCR, bmcr); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03) { - if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { - rtl_writephy(tp, 0x17, 0x2138); - rtl_writephy(tp, 0x0e, 0x0260); - } else { - rtl_writephy(tp, 0x17, 0x2108); - rtl_writephy(tp, 0x0e, 0x0000); - } - } - - rc = 0; -out: - return rc; -} - -static int rtl8169_set_speed(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 advertising) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); - if (ret < 0) - goto out; - - if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full) && - !pci_is_pcie(tp->pci_dev)) { - mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); - } -out: - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@@ -1711,6 -1940,76 +1711,6 @@@ static void rtl8169_rx_vlan_tag(struct __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); }
-static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 status; - u32 supported, advertising; - - supported = - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->base.port = PORT_FIBRE; - - status = RTL_R32(tp, TBICSR); - advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; - cmd->base.autoneg = !!(status & TBINwEnable); - - cmd->base.speed = SPEED_1000; - cmd->base.duplex = DUPLEX_FULL; /* Always set */ - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - - return 0; -} - -static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - mii_ethtool_get_link_ksettings(&tp->mii, cmd); - - return 0; -} - -static int rtl8169_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - - rtl_lock_work(tp); - rc = tp->get_link_ksettings(dev, cmd); - rtl_unlock_work(tp); - - return rc; -} - -static int rtl8169_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - u32 advertising; - - if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising)) - return -EINVAL; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex, advertising); - rtl_unlock_work(tp); - - return rc; -} - static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@@ -1886,6 -2185,13 +1886,6 @@@ static void rtl8169_get_strings(struct } }
-static int rtl8169_nway_reset(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return mii_nway_restart(&tp->mii); -} - /* * Interrupt coalescing * @@@ -1958,7 -2264,7 +1958,7 @@@ static const struct rtl_coalesce_info * const struct rtl_coalesce_info *ci; int rc;
- rc = rtl8169_get_link_ksettings(dev, &ecmd); + rc = phy_ethtool_get_link_ksettings(dev, &ecmd); if (rc < 0) return ERR_PTR(rc);
@@@ -2116,9 -2422,9 +2116,9 @@@ static const struct ethtool_ops rtl8169 .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, - .nway_reset = rtl8169_nway_reset, - .get_link_ksettings = rtl8169_get_link_ksettings, - .set_link_ksettings = rtl8169_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@@ -2231,15 -2537,15 +2231,15 @@@ "unknown MAC, using family default\n"); tp->mac_version = default_version; } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_42 : RTL_GIGA_MAC_VER_43; } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_45 : RTL_GIGA_MAC_VER_47; } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_46 : RTL_GIGA_MAC_VER_48; } @@@ -2247,7 -2553,7 +2247,7 @@@
static void rtl8169_print_mac_version(struct rtl8169_private *tp) { - dprintk("mac_version = 0x%02x\n", tp->mac_version); + netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); }
struct phy_reg { @@@ -4099,16 -4405,62 +4099,16 @@@ static void rtl_hw_phy_config(struct ne } }
-static void rtl_phy_work(struct rtl8169_private *tp) -{ - struct timer_list *timer = &tp->timer; - unsigned long timeout = RTL8169_PHY_TIMEOUT; - - assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - - if (tp->phy_reset_pending(tp)) { - /* - * A busy loop could burn quite a few cycles on nowadays CPU. - * Let's delay the execution of the timer for a few ticks. - */ - timeout = HZ/10; - goto out_mod_timer; - } - - if (tp->link_ok(tp)) - return; - - netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); - - tp->phy_reset_enable(tp); - -out_mod_timer: - mod_timer(timer, jiffies + timeout); -} - static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { if (!test_and_set_bit(flag, tp->wk.flags)) schedule_work(&tp->wk.work); }
-static void rtl8169_phy_timer(struct timer_list *t) -{ - struct rtl8169_private *tp = from_timer(tp, t, timer); - - rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); -} - -DECLARE_RTL_COND(rtl_phy_reset_cond) -{ - return tp->phy_reset_pending(tp); -} - -static void rtl8169_phy_reset(struct net_device *dev, - struct rtl8169_private *tp) -{ - tp->phy_reset_enable(tp); - rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); -} - static bool rtl_tbi_enabled(struct rtl8169_private *tp) { return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(tp, PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); }
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) @@@ -4116,8 -4468,7 +4116,8 @@@ rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); }
@@@ -4127,18 -4478,23 +4127,18 @@@ pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
if (tp->mac_version == RTL_GIGA_MAC_VER_02) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + netif_dbg(tp, drv, dev, + "Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 }
- rtl8169_phy_reset(dev, tp); + /* We may have called phy_speed_down before */ + phy_speed_up(dev->phydev);
- rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0)); - - if (rtl_tbi_enabled(tp)) - netif_info(tp, link, dev, "TBI auto-negotiating\n"); + genphy_soft_reset(dev->phydev); }
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@@ -4183,10 -4539,34 +4183,10 @@@ static int rtl_set_mac_address(struct n
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct rtl8169_private *tp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(ifr); - - return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; -} - -static int rtl_xmii_ioctl(struct rtl8169_private *tp, - struct mii_ioctl_data *data, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = 32; /* Internal PHY */ - return 0; - - case SIOCGMIIREG: - data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); - return 0; - - case SIOCSMIIREG: - rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); - return 0; - } - return -EOPNOTSUPP; -} + if (!netif_running(dev)) + return -ENODEV;
-static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) -{ - return -EOPNOTSUPP; + return phy_mii_ioctl(dev->phydev, ifr, cmd); }
static void rtl_init_mdio_ops(struct rtl8169_private *tp) @@@ -4214,6 -4594,30 +4214,6 @@@ } }
-static void rtl_speed_down(struct rtl8169_private *tp) -{ - u32 adv; - int lpa; - - rtl_writephy(tp, 0x1f, 0x0000); - lpa = rtl_readphy(tp, MII_LPA); - - if (lpa & (LPA_10HALF | LPA_10FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - else if (lpa & (LPA_100HALF | LPA_100FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - else - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0); - - rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - adv); -} - static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { switch (tp->mac_version) { @@@ -4235,15 -4639,56 +4235,15 @@@
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) return false;
- rtl_speed_down(tp); + phy_speed_down(tp->dev->phydev, false); rtl_wol_suspend_quirk(tp);
return true; }
-static void r8168_phy_power_up(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0000); - break; - default: - break; - } - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); - - /* give MAC/PHY some time to resume */ - msleep(20); -} - -static void r8168_phy_power_down(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); - break; - - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0200); - default: - rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); - break; - } -} - static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) @@@ -4256,6 -4701,8 +4256,6 @@@ if (rtl_wol_pll_power_down(tp)) return;
- r8168_phy_power_down(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: @@@ -4307,9 -4754,7 +4307,9 @@@ static void r8168_pll_power_up(struct r break; }
- r8168_phy_power_up(tp); + phy_resume(tp->dev->phydev); + /* give MAC/PHY some time to resume */ + msleep(20); }
static void rtl_pll_power_down(struct rtl8169_private *tp) @@@ -4727,8 -5172,8 +4727,8 @@@ static void rtl_hw_start_8169(struct rt
if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { - dprintk("Set MAC Reg C+CR Offset 0xe0. " - "Bit-3 and bit-14 MUST be 1\n"); + netif_dbg(tp, drv, tp->dev, + "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); }
@@@ -4791,7 -5236,12 +4791,7 @@@ static void rtl_csi_access_enable(struc rtl_csi_write(tp, 0x070c, csi | val << 24); }
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { rtl_csi_access_enable(tp, 0x27); } @@@ -4840,17 -5290,6 +4840,17 @@@ static void rtl_pcie_state_l2l3_enable( RTL_W8(tp, Config3, data); }
+static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + if (enable) { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } +} + static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@@ -4898,7 -5337,7 +4898,7 @@@ static void rtl_hw_start_8168cp_1(struc { 0x07, 0, 0x2000 } };
- rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
@@@ -4907,7 -5346,7 +4907,7 @@@
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@@ -4920,7 -5359,7 +4920,7 @@@
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@@ -4944,7 -5383,7 +4944,7 @@@ static void rtl_hw_start_8168c_1(struc { 0x06, 0x0080, 0x0000 } };
- rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
@@@ -4960,7 -5399,7 +4960,7 @@@ static void rtl_hw_start_8168c_2(struc { 0x03, 0x0400, 0x0220 } };
- rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
@@@ -4974,14 -5413,14 +4974,14 @@@ static void rtl_hw_start_8168c_3(struc
static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
__rtl_hw_start_8168cp(tp); }
static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
@@@ -4996,7 -5435,7 +4996,7 @@@
static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp);
if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@@ -5014,7 -5453,7 +5014,7 @@@ static void rtl_hw_start_8168d_4(struc { 0x0c, 0x0100, 0x0020 } };
- rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@@ -5043,7 -5482,7 +5043,7 @@@ static void rtl_hw_start_8168e_1(struc { 0x0a, 0x0000, 0x0040 } };
- rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
@@@ -5068,7 -5507,7 +5068,7 @@@ static void rtl_hw_start_8168e_2(struc { 0x19, 0x0000, 0x0224 } };
- rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
@@@ -5097,13 -5536,11 +5097,13 @@@ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@@ -5174,7 -5611,7 +5174,7 @@@ static void rtl_hw_start_8168g(struct r rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@@ -5209,9 -5646,9 +5209,9 @@@ static void rtl_hw_start_8168g_1(struc rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@@ -5244,9 -5681,9 +5244,9 @@@ static void rtl_hw_start_8411_2(struct rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) @@@ -5263,7 -5700,8 +5263,7 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); @@@ -5273,7 -5711,7 +5273,7 @@@ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@@ -5342,8 -5780,6 +5342,8 @@@ r8168_mac_ocp_write(tp, 0xe63e, 0x0000); r8168_mac_ocp_write(tp, 0xc094, 0x0000); r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168ep(struct rtl8169_private *tp) @@@ -5357,7 -5793,7 +5357,7 @@@ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@@ -5395,12 -5831,11 +5395,12 @@@ static void rtl_hw_start_8168ep_1(struc };
/* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) @@@ -5412,15 -5847,14 +5412,15 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@@ -5434,7 -5868,8 +5434,7 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp); @@@ -5454,8 -5889,6 +5454,8 @@@ data = r8168_mac_ocp_read(tp, 0xe860); data |= 0x0080; r8168_mac_ocp_write(tp, 0xe860, data); + + rtl_hw_aspm_clkreq_enable(tp, true); }
static void rtl_hw_start_8168(struct rtl8169_private *tp) @@@ -5573,9 -6006,8 +5573,9 @@@ break;
default: - printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - tp->dev->name, tp->mac_version); + netif_err(tp, drv, tp->dev, + "unknown chipset (mac_version = %d)\n", + tp->mac_version); break; } } @@@ -5594,7 -6026,7 +5594,7 @@@ static void rtl_hw_start_8102e_1(struc }; u8 cfg1;
- rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, FIX_NAK_1);
@@@ -5613,7 -6045,7 +5613,7 @@@
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@@ -5668,7 -6100,7 +5668,7 @@@ static void rtl_hw_start_8402(struct rt { 0x1e, 0, 0x4000 } };
- rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); @@@ -5952,6 -6384,7 +5952,6 @@@ static void rtl_reset_work(struct rtl81 napi_enable(&tp->napi); rtl_hw_start(tp); netif_wake_queue(dev); - rtl8169_check_link_status(dev, tp); }
static void rtl8169_tx_timeout(struct net_device *dev) @@@ -6568,7 -7001,7 +6568,7 @@@ static void rtl_slow_event_work(struct rtl8169_pcierr_interrupt(dev);
if (status & LinkChg) - rtl8169_check_link_status(dev, tp); + phy_mac_interrupt(dev->phydev);
rtl_irq_enable_all(tp); } @@@ -6582,6 -7015,7 +6582,6 @@@ static void rtl_task(struct work_struc /* XXX - keep rtl_slow_event_work() as first element. */ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, - { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } }; struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); @@@ -6650,51 -7084,11 +6650,51 @@@ static void rtl8169_rx_missed(struct ne RTL_W32(tp, RxMissed, 0); }
+static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(ndev->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + /* Ensure to advertise everything, incl. pause */ + phydev->advertising = phydev->supported; + + phy_attached_info(phydev); + + return 0; +} + static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer); + phy_stop(dev->phydev);
napi_disable(&tp->napi); netif_stop_queue(dev); @@@ -6735,8 -7129,6 +6735,8 @@@ static int rtl8169_close(struct net_dev
cancel_work_sync(&tp->wk.work);
+ phy_disconnect(dev->phydev); + pci_free_irq(pdev, 0, tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, @@@ -6797,10 -7189,6 +6797,10 @@@ static int rtl_open(struct net_device * if (retval < 0) goto err_release_fw_2;
+ retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + rtl_lock_work(tp);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@@ -6816,17 -7204,17 +6816,17 @@@ if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ phy_start(dev->phydev); netif_start_queue(dev);
rtl_unlock_work(tp);
- tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev); - - rtl8169_check_link_status(dev, tp); out: return retval;
+err_free_irq: + pci_free_irq(pdev, 0, tp); err_release_fw_2: rtl_release_firmware(tp); rtl8169_rx_clear(tp); @@@ -6905,7 -7293,6 +6905,7 @@@ static void rtl8169_net_suspend(struct if (!netif_running(dev)) return;
+ phy_stop(dev->phydev); netif_device_detach(dev); netif_stop_queue(dev);
@@@ -6936,9 -7323,6 +6936,9 @@@ static void __rtl8169_resume(struct net netif_device_attach(dev);
rtl_pll_power_up(tp); + rtl8169_init_phy(dev, tp); + + phy_start(tp->dev->phydev);
rtl_lock_work(tp); napi_enable(&tp->napi); @@@ -6952,6 -7336,9 +6952,6 @@@ static int rtl8169_resume(struct devic { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_init_phy(dev, tp);
if (netif_running(dev)) __rtl8169_resume(dev); @@@ -6965,10 -7352,13 +6965,10 @@@ static int rtl8169_runtime_suspend(stru struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray) { - rtl_pll_power_down(tp); + if (!tp->TxDescArray) return 0; - }
rtl_lock_work(tp); - tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); rtl_unlock_work(tp);
@@@ -6993,8 -7383,11 +6993,8 @@@ static int rtl8169_runtime_resume(struc
rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); - tp->saved_wolopts = 0; rtl_unlock_work(tp);
- rtl8169_init_phy(dev, tp); - __rtl8169_resume(dev);
return 0; @@@ -7062,7 -7455,7 +7062,7 @@@ static void rtl_shutdown(struct pci_de rtl8169_hw_reset(tp);
if (system_state == SYSTEM_POWER_OFF) { - if (__rtl8169_get_wol(tp) & WAKE_ANY) { + if (tp->saved_wolopts) { rtl_wol_suspend_quirk(tp); rtl_wol_shutdown_quirk(tp); } @@@ -7083,7 -7476,6 +7083,7 @@@ static void rtl_remove_one(struct pci_d netif_napi_del(&tp->napi);
unregister_netdev(dev); + mdiobus_unregister(tp->mii_bus);
rtl_release_firmware(tp);
@@@ -7169,68 -7561,6 +7169,68 @@@ DECLARE_RTL_COND(rtl_rxtx_empty_cond return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; }
+static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct phy_device *phydev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", + PCI_DEVID(pdev->bus->number, pdev->devfn)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = mdiobus_register(new_bus); + if (ret) + return ret; + + phydev = mdiobus_get_phy(new_bus, 0); + if (!phydev) { + mdiobus_unregister(new_bus); + return -ENODEV; + } + + /* PHY will be woken up in rtl_open() */ + phy_suspend(phydev); + + tp->mii_bus = new_bus; + + return 0; +} + static void rtl_hw_init_8168g(struct rtl8169_private *tp) { u32 data; @@@ -7288,6 -7618,7 +7288,6 @@@ static int rtl_init_one(struct pci_dev { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; struct rtl8169_private *tp; - struct mii_if_info *mii; struct net_device *dev; int chipset, region, i; int rc; @@@ -7307,7 -7638,19 +7307,7 @@@ tp->dev = dev; tp->pci_dev = pdev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - - mii = &tp->mii; - mii->dev = dev; - mii->mdio_read = rtl_mdio_read; - mii->mdio_write = rtl_mdio_write; - mii->phy_id_mask = 0x1f; - mii->reg_num_mask = 0x1f; - mii->supports_gmii = cfg->has_gmii; - - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); + tp->supports_gmii = cfg->has_gmii;
/* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); @@@ -7346,11 -7689,6 +7346,11 @@@ /* Identify chip attached to board */ rtl8169_get_mac_version(tp, cfg->default_ver);
+ if (rtl_tbi_enabled(tp)) { + dev_err(&pdev->dev, "TBI fiber mode not supported\n"); + return -ENODEV; + } + tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) && @@@ -7399,6 -7737,22 +7399,6 @@@ /* override BIOS settings, use userspace tools to enable WOL */ __rtl8169_set_wol(tp, 0);
- if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - mutex_init(&tp->wk.mutex); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); @@@ -7435,6 -7789,7 +7435,7 @@@ NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
tp->cp_cmd |= RxChkSum | RxVlan;
@@@ -7469,6 -7824,8 +7470,6 @@@ tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info;
- timer_setup(&tp->timer, rtl8169_phy_timer, 0); - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), @@@ -7479,17 -7836,10 +7480,17 @@@
pci_set_drvdata(pdev, dev);
- rc = register_netdev(dev); - if (rc < 0) + rc = r8169_mdio_register(tp); + if (rc) return rc;
+ /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + + rc = register_netdev(dev); + if (rc) + goto err_mdio_unregister; + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), @@@ -7504,14 -7854,12 +7505,14 @@@ if (r8168_check_dash(tp)) rtl8168_driver_start(tp);
- netif_carrier_off(dev); - if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev);
return 0; + +err_mdio_unregister: + mdiobus_unregister(tp->mii_bus); + return rc; }
static struct pci_driver rtl8169_pci_driver = { diff --combined drivers/net/ethernet/renesas/ravb_main.c index f7e649c831ad,0d811c02ff34..c06f2df895c2 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -980,6 -980,13 +980,13 @@@ static void ravb_adjust_link(struct net struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link) + ravb_rcv_snd_disable(ndev);
if (phydev->link) { if (phydev->duplex != priv->duplex) { @@@ -997,18 -1004,21 +1004,21 @@@ ravb_modify(ndev, ECMR, ECMR_TXF, 0); new_state = true; priv->link = phydev->link; - if (priv->no_avb_link) - ravb_rcv_snd_enable(ndev); } } else if (priv->link) { new_state = true; priv->link = 0; priv->speed = 0; priv->duplex = -1; - if (priv->no_avb_link) - ravb_rcv_snd_disable(ndev); }
+ /* Enable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link && phydev->link) + ravb_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + if (new_state && netif_msg_link(priv)) phy_print_status(phydev); } @@@ -1096,75 -1106,6 +1106,6 @@@ static int ravb_phy_start(struct net_de return 0; }
- static int ravb_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *cmd) - { - struct ravb_private *priv = netdev_priv(ndev); - unsigned long flags; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&priv->lock, flags); - phy_ethtool_ksettings_get(ndev->phydev, cmd); - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; - } - - static int ravb_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *cmd) - { - struct ravb_private *priv = netdev_priv(ndev); - unsigned long flags; - int error; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&priv->lock, flags); - - /* Disable TX and RX */ - ravb_rcv_snd_disable(ndev); - - error = phy_ethtool_ksettings_set(ndev->phydev, cmd); - if (error) - goto error_exit; - - if (cmd->base.duplex == DUPLEX_FULL) - priv->duplex = 1; - else - priv->duplex = 0; - - ravb_set_duplex(ndev); - - error_exit: - mdelay(1); - - /* Enable TX and RX */ - ravb_rcv_snd_enable(ndev); - - mmiowb(); - spin_unlock_irqrestore(&priv->lock, flags); - - return error; - } - - static int ravb_nway_reset(struct net_device *ndev) - { - struct ravb_private *priv = netdev_priv(ndev); - int error = -ENODEV; - unsigned long flags; - - if (ndev->phydev) { - spin_lock_irqsave(&priv->lock, flags); - error = phy_start_aneg(ndev->phydev); - spin_unlock_irqrestore(&priv->lock, flags); - } - - return error; - } - static u32 ravb_get_msglevel(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@@ -1226,7 -1167,7 +1167,7 @@@ static int ravb_get_sset_count(struct n }
static void ravb_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *estats, u64 *data) { struct ravb_private *priv = netdev_priv(ndev); int i = 0; @@@ -1258,7 -1199,7 +1199,7 @@@ static void ravb_get_strings(struct net { switch (stringset) { case ETH_SS_STATS: - memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); break; } } @@@ -1377,7 -1318,7 +1318,7 @@@ static int ravb_set_wol(struct net_devi }
static const struct ethtool_ops ravb_ethtool_ops = { - .nway_reset = ravb_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_msglevel = ravb_get_msglevel, .set_msglevel = ravb_set_msglevel, .get_link = ethtool_op_get_link, @@@ -1387,8 -1328,8 +1328,8 @@@ .get_ringparam = ravb_get_ringparam, .set_ringparam = ravb_set_ringparam, .get_ts_info = ravb_get_ts_info, - .get_link_ksettings = ravb_get_link_ksettings, - .set_link_ksettings = ravb_set_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_wol = ravb_get_wol, .set_wol = ravb_set_wol, }; @@@ -1623,7 -1564,7 +1564,7 @@@ static netdev_tx_t ravb_start_xmit(stru /* TAG and timestamp required flag */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; - desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); }
skb_tx_timestamp(skb); @@@ -1656,8 -1597,7 +1597,8 @@@ drop }
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : diff --combined drivers/net/ethernet/renesas/sh_eth.c index 71651e47660a,5614fd231bbe..314aeb5dd921 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -622,6 -622,7 +622,6 @@@ static struct sh_eth_cpu_data r7s72100_ .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@@ -671,6 -672,7 +671,6 @@@ static struct sh_eth_cpu_data r8a7740_d .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@@ -796,6 -798,7 +796,6 @@@ static struct sh_eth_cpu_data r8a77980_ .hw_swap = 1, .nbst = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@@ -848,6 -851,7 +848,6 @@@ static struct sh_eth_cpu_data sh7724_da .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ };
static void sh_eth_set_rate_sh7757(struct net_device *ndev) @@@ -894,6 -898,7 +894,6 @@@ static struct sh_eth_cpu_data sh7757_da .hw_swap = 1, .no_ade = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .rtrate = 1, .dual_port = 1, }; @@@ -973,6 -978,7 +973,6 @@@ static struct sh_eth_cpu_data sh7757_da .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@@ -1461,7 -1467,7 +1461,7 @@@ static int sh_eth_dev_init(struct net_d /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) - sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
/* all sh_eth int mask */ sh_eth_write(ndev, 0, EESIPR); @@@ -1521,9 -1527,9 +1521,9 @@@
/* mask reset */ if (mdp->cd->apr) - sh_eth_write(ndev, APR_AP, APR); + sh_eth_write(ndev, 1, APR); if (mdp->cd->mpr) - sh_eth_write(ndev, MPR_MP, MPR); + sh_eth_write(ndev, 1, MPR); if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
@@@ -1921,8 -1927,15 +1921,15 @@@ static void sh_eth_adjust_link(struct n { struct sh_eth_private *mdp = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; + unsigned long flags; int new_state = 0;
+ spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); + if (phydev->link) { if (phydev->duplex != mdp->duplex) { new_state = 1; @@@ -1941,18 -1954,21 +1948,21 @@@ sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); new_state = 1; mdp->link = phydev->link; - if (mdp->cd->no_psr || mdp->no_ether_link) - sh_eth_rcv_snd_enable(ndev); } } else if (mdp->link) { new_state = 1; mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; - if (mdp->cd->no_psr || mdp->no_ether_link) - sh_eth_rcv_snd_disable(ndev); }
+ /* Enable TX and RX right over here, if E-MAC change is ignored */ + if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) + sh_eth_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&mdp->lock, flags); + if (new_state && netif_msg_link(mdp)) phy_print_status(phydev); } @@@ -2024,60 -2040,6 +2034,6 @@@ static int sh_eth_phy_start(struct net_ return 0; }
- static int sh_eth_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *cmd) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&mdp->lock, flags); - phy_ethtool_ksettings_get(ndev->phydev, cmd); - spin_unlock_irqrestore(&mdp->lock, flags); - - return 0; - } - - static int sh_eth_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *cmd) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - int ret; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&mdp->lock, flags); - - /* disable tx and rx */ - sh_eth_rcv_snd_disable(ndev); - - ret = phy_ethtool_ksettings_set(ndev->phydev, cmd); - if (ret) - goto error_exit; - - if (cmd->base.duplex == DUPLEX_FULL) - mdp->duplex = 1; - else - mdp->duplex = 0; - - if (mdp->cd->set_duplex) - mdp->cd->set_duplex(ndev); - - error_exit: - mdelay(1); - - /* enable tx and rx */ - sh_eth_rcv_snd_enable(ndev); - - spin_unlock_irqrestore(&mdp->lock, flags); - - return ret; - } - /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the * version must be bumped as well. Just adding registers up to that * limit is fine, as long as the existing register indices don't @@@ -2257,22 -2219,6 +2213,6 @@@ static void sh_eth_get_regs(struct net_ pm_runtime_put_sync(&mdp->pdev->dev); }
- static int sh_eth_nway_reset(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - int ret; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&mdp->lock, flags); - ret = phy_start_aneg(ndev->phydev); - spin_unlock_irqrestore(&mdp->lock, flags); - - return ret; - } - static u32 sh_eth_get_msglevel(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -2423,7 -2369,7 +2363,7 @@@ static int sh_eth_set_wol(struct net_de static const struct ethtool_ops sh_eth_ethtool_ops = { .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, - .nway_reset = sh_eth_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_msglevel = sh_eth_get_msglevel, .set_msglevel = sh_eth_set_msglevel, .get_link = ethtool_op_get_link, @@@ -2432,8 -2378,8 +2372,8 @@@ .get_sset_count = sh_eth_get_sset_count, .get_ringparam = sh_eth_get_ringparam, .set_ringparam = sh_eth_set_ringparam, - .get_link_ksettings = sh_eth_get_link_ksettings, - .set_link_ksettings = sh_eth_set_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_wol = sh_eth_get_wol, .set_wol = sh_eth_set_wol, }; diff --combined drivers/net/ethernet/sfc/efx.c index b24c2e21db8e,ce3a177081a8..330233286e78 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@@ -264,17 -264,11 +264,17 @@@ static int efx_check_disabled(struct ef static int efx_process_channel(struct efx_channel *channel, int budget) { struct efx_tx_queue *tx_queue; + struct list_head rx_list; int spent;
if (unlikely(!channel->enabled)) return 0;
+ /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); + INIT_LIST_HEAD(&rx_list); + channel->rx_list = &rx_list; + efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->pkts_compl = 0; tx_queue->bytes_compl = 0; @@@ -297,10 -291,6 +297,10 @@@ } }
+ /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + return spent; }
@@@ -565,8 -555,6 +565,8 @@@ static int efx_probe_channel(struct efx goto fail; }
+ channel->rx_list = NULL; + return 0;
fail: @@@ -1883,12 -1871,6 +1883,6 @@@ static void efx_remove_filters(struct e up_write(&efx->filter_sem); }
- static void efx_restore_filters(struct efx_nic *efx) - { - down_read(&efx->filter_sem); - efx->type->filter_table_restore(efx); - up_read(&efx->filter_sem); - }
/************************************************************************** * @@@ -2700,6 -2682,7 +2694,7 @@@ void efx_reset_down(struct efx_nic *efx efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); mutex_lock(&efx->rss_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && method != RESET_TYPE_DATAPATH) @@@ -2757,9 -2740,8 +2752,8 @@@ int efx_reset_up(struct efx_nic *efx, e if (efx->type->rx_restore_rss_contexts) efx->type->rx_restore_rss_contexts(efx); mutex_unlock(&efx->rss_lock); - down_read(&efx->filter_sem); - efx_restore_filters(efx); - up_read(&efx->filter_sem); + efx->type->filter_table_restore(efx); + up_write(&efx->filter_sem); if (efx->type->sriov_reset) efx->type->sriov_reset(efx);
@@@ -2776,6 -2758,7 +2770,7 @@@ fail efx->port_initialized = false;
mutex_unlock(&efx->rss_lock); + up_write(&efx->filter_sem); mutex_unlock(&efx->mac_lock);
return rc; @@@ -3485,7 -3468,9 +3480,9 @@@ static int efx_pci_probe_main(struct ef
efx_init_napi(efx);
+ down_write(&efx->filter_sem); rc = efx->type->init(efx); + up_write(&efx->filter_sem); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise NIC\n"); @@@ -3777,7 -3762,9 +3774,9 @@@ static int efx_pm_resume(struct device rc = efx->type->reset(efx, RESET_TYPE_ALL); if (rc) return rc; + down_write(&efx->filter_sem); rc = efx->type->init(efx); + up_write(&efx->filter_sem); if (rc) return rc; rc = efx_pm_thaw(dev); diff --combined drivers/net/tun.c index 7b5748f86c9c,f5727baac84a..0a3134712652 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -607,8 -607,7 +607,8 @@@ static u16 tun_ebpf_select_queue(struc }
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct tun_struct *tun = netdev_priv(dev); u16 ret; @@@ -1269,6 -1268,7 +1269,6 @@@ static int tun_xdp(struct net_device *d return tun_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = tun_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; @@@ -1688,7 -1688,7 +1688,7 @@@ static struct sk_buff *tun_build_skb(st case XDP_TX: get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_tx(tun->dev, &xdp)) + if (tun_xdp_tx(tun->dev, &xdp) < 0) goto err_redirect; rcu_read_unlock(); local_bh_enable(); diff --combined drivers/net/usb/asix_devices.c index 8f41c6bda8e5,b1b3d8f7e67d..b654f05b2ccd --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@@ -642,10 -642,12 +642,12 @@@ static void ax88772_restore_phy(struct priv->presvd_phy_advertise);
/* Restore BMCR */ + if (priv->presvd_phy_bmcr & BMCR_ANENABLE) + priv->presvd_phy_bmcr |= BMCR_ANRESTART; + asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, priv->presvd_phy_bmcr);
- mii_nway_restart(&dev->mii); priv->presvd_phy_advertise = 0; priv->presvd_phy_bmcr = 0; } @@@ -691,32 -693,24 +693,32 @@@ static int ax88772_bind(struct usbnet * u32 phyid; struct asix_common_private *priv;
- usbnet_get_endpoints(dev,intf); + usbnet_get_endpoints(dev, intf);
- /* Get the MAC address */ - if (dev->driver_info->data & FLAG_EEPROM_MAC) { - for (i = 0; i < (ETH_ALEN >> 1); i++) { - ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i, - 0, 2, buf + i * 2, 0); - if (ret < 0) - break; - } + /* Maybe the boot loader passed the MAC address via device tree */ + if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { + netif_dbg(dev, ifup, dev->net, + "MAC address read from device tree"); } else { - ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, - 0, 0, ETH_ALEN, buf, 0); - } + /* Try getting the MAC address from EEPROM */ + if (dev->driver_info->data & FLAG_EEPROM_MAC) { + for (i = 0; i < (ETH_ALEN >> 1); i++) { + ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, + 0x04 + i, 0, 2, buf + i * 2, + 0); + if (ret < 0) + break; + } + } else { + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, + 0, 0, ETH_ALEN, buf, 0); + }
- if (ret < 0) { - netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); - return ret; + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", + ret); + return ret; + } }
asix_set_netdev_dev_addr(dev, buf); diff --combined drivers/net/usb/lan78xx.c index 6f2ea84bf0b2,ed10d49eb5e0..ac25981ee4d5 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@@ -1721,7 -1721,7 +1721,7 @@@ static void lan78xx_init_mac_address(st "MAC address read from EEPROM"); } else { /* generate random MAC */ - random_ether_addr(addr); + eth_random_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } @@@ -3344,6 -3344,7 +3344,7 @@@ static void lan78xx_tx_bh(struct lan78x pkt_cnt = 0; count = 0; length = 0; + spin_lock_irqsave(&tqp->lock, flags); for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { if (skb_is_gso(skb)) { if (pkt_cnt) { @@@ -3352,7 -3353,8 +3353,8 @@@ } count = 1; length = skb->len - TX_OVERHEAD; - skb2 = skb_dequeue(tqp); + __skb_unlink(skb, tqp); + spin_unlock_irqrestore(&tqp->lock, flags); goto gso_skb; }
@@@ -3361,6 -3363,7 +3363,7 @@@ skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); pkt_cnt++; } + spin_unlock_irqrestore(&tqp->lock, flags);
/* copy to a single skb */ skb = alloc_skb(skb_totallen, GFP_ATOMIC); diff --combined drivers/net/usb/rtl8150.c index 0e81d4c441d9,48ba80a8ca5c..80373a9171dd --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@@ -391,7 -391,6 +391,7 @@@ static void read_bulk_callback(struct u u16 rx_stat; int status = urb->status; int result; + unsigned long flags;
dev = urb->context; if (!dev) @@@ -433,9 -432,9 +433,9 @@@ netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len;
- spin_lock(&dev->rx_pool_lock); + spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); - spin_unlock(&dev->rx_pool_lock); + spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched;
@@@ -682,7 -681,7 +682,7 @@@ static void rtl8150_set_multicast(struc (netdev->flags & IFF_ALLMULTI)) { rx_creg &= 0xfffe; rx_creg |= 0x0002; - dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); + dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ rx_creg &= 0x00fc; diff --combined drivers/net/wireless/realtek/rtlwifi/base.c index 31bd6f714052,54c9f6ab0c8c..f4122c8fdd97 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@@ -484,18 -484,21 +484,21 @@@ static void _rtl_init_deferred_work(str
}
- void rtl_deinit_deferred_work(struct ieee80211_hw *hw) + void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) { struct rtl_priv *rtlpriv = rtl_priv(hw);
del_timer_sync(&rtlpriv->works.watchdog_timer);
- cancel_delayed_work(&rtlpriv->works.watchdog_wq); - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); - cancel_delayed_work(&rtlpriv->works.ps_work); - cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); - cancel_delayed_work(&rtlpriv->works.fwevt_wq); - cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); + cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); + if (ips_wq) + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + else + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ps_work); + cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq); + cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq); + cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq); } EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
@@@ -1904,7 -1907,7 +1907,7 @@@ void rtl_rx_ampdu_apply(struct rtl_pri reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } EXPORT_SYMBOL(rtl_rx_ampdu_apply);
diff --combined include/linux/skbuff.h index 14bc9ebe30f2,610a201126ee..fd3cb1b247df --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -630,6 -630,7 +630,7 @@@ typedef unsigned char *sk_buff_data_t * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue + * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@@ -640,7 -641,6 +641,7 @@@ * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour + * @decrypted: Decrypted SKB * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@@ -678,8 -678,7 +679,8 @@@ struct sk_buff int ip_defrag_offset; }; }; - struct rb_node rbnode; /* used in netem & tcp stack */ + struct rb_node rbnode; /* used in netem & tcp stack */ + struct list_head list; }; struct sock *sk;
@@@ -737,7 -736,7 +738,7 @@@ peeked:1, head_frag:1, xmit_more:1, - __unused:1; + pfmemalloc:1;
/* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() @@@ -756,31 -755,30 +757,30 @@@
__u8 __pkt_type_offset[0]; __u8 pkt_type:3; - __u8 pfmemalloc:1; __u8 ignore_df:1; - __u8 nf_trace:1; __u8 ip_summed:2; __u8 ooo_okay:1; + __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; - __u8 no_fcs:1; /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; + __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 csum_not_inet:1; - __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif __u8 ipvs_property:1; + __u8 inner_protocol_type:1; __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV @@@ -793,9 -791,6 +793,9 @@@ __u8 tc_redirected:1; __u8 tc_from_ingress:1; #endif +#ifdef CONFIG_TLS_DEVICE + __u8 decrypted:1; +#endif
#ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ diff --combined include/net/ipv6.h index aa6fd11a887c,8f73be494503..190015652168 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@@ -294,7 -294,6 +294,7 @@@ struct ipv6_fl_socklist };
struct ipcm6_cookie { + struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __s8 dontfrag; @@@ -302,25 -301,6 +302,25 @@@ __u16 gso_size; };
+static inline void ipcm6_init(struct ipcm6_cookie *ipc6) +{ + *ipc6 = (struct ipcm6_cookie) { + .hlimit = -1, + .tclass = -1, + .dontfrag = -1, + }; +} + +static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, + const struct ipv6_pinfo *np) +{ + *ipc6 = (struct ipcm6_cookie) { + .hlimit = -1, + .tclass = np->tclass, + .dontfrag = np->dontfrag, + }; +} + static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) { struct ipv6_txoptions *opt; @@@ -375,14 -355,7 +375,7 @@@ struct ipv6_txoptions *ipv6_dup_options struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, - struct ipv6_opt_hdr __user *newopt, - int newoptlen); - struct ipv6_txoptions * - ipv6_renew_options_kern(struct sock *sk, - struct ipv6_txoptions *opt, - int newtype, - struct ipv6_opt_hdr *newopt, - int newoptlen); + struct ipv6_opt_hdr *newopt); struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt);
@@@ -850,7 -823,7 +843,7 @@@ static inline __be32 ip6_make_flowlabel * to minimize possbility that any useful information to an * attacker is leaked. Only lower 20 bits are relevant. */ - rol32(hash, 16); + hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
@@@ -942,8 -915,6 +935,8 @@@ static inline __be32 flowi6_get_flowlab
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); +void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev);
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
@@@ -960,7 -931,8 +953,7 @@@ int ip6_append_data(struct sock *sk int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, - const struct sockcm_cookie *sockc); + struct rt6_info *rt, unsigned int flags);
int ip6_push_pending_frames(struct sock *sk);
@@@ -977,7 -949,8 +970,7 @@@ struct sk_buff *ip6_make_skb(struct soc void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - struct inet_cork_full *cork, - const struct sockcm_cookie *sockc); + struct inet_cork_full *cork);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { @@@ -1127,6 -1100,8 +1120,8 @@@ void ipv6_sysctl_unregister(void)
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr); + int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, + const struct in6_addr *addr, unsigned int mode); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); #endif /* _NET_IPV6_H */ diff --combined include/net/tcp.h index 582304955087,3482d13d655b..f6e0a9b1dff3 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@@ -472,20 -472,19 +472,20 @@@ struct sock *cookie_v4_check(struct soc */ static inline void tcp_synq_overflow(const struct sock *sk) { - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - unsigned long now = jiffies; + unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int now = jiffies;
- if (time_after(now, last_overflow + HZ)) + if (time_after32(now, last_overflow + HZ)) tcp_sk(sk)->rx_opt.ts_recent_stamp = now; }
/* syncookies: no recent synqueue overflow on this listening socket? */ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int now = jiffies;
- return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); + return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); }
static inline u32 tcp_cookie_time(void) @@@ -829,6 -828,10 +829,10 @@@ struct tcp_skb_cb
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) + { + TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); + }
#if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, @@@ -909,8 -912,6 +913,6 @@@ enum tcp_ca_event CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ - CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ - CA_EVENT_NON_DELAYED_ACK, };
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */ @@@ -955,8 -956,6 +957,8 @@@ struct rate_sample u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ s32 delivered; /* number of packets delivered over interval */ long interval_us; /* time for tp->delivered to incr "delivered" */ + u32 snd_interval_us; /* snd interval for delivered packets */ + u32 rcv_interval_us; /* rcv interval for delivered packets */ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ int losses; /* number of packets marked lost upon ACK */ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ @@@ -1188,17 -1187,6 +1190,17 @@@ static inline bool tcp_is_cwnd_limited( return tp->is_cwnd_limited; }
+/* BBR congestion control needs pacing. + * Same remark for SO_MAX_PACING_RATE. + * sch_fq packet scheduler is efficiently handling pacing, + * but is not always installed/used. + * Return true if TCP stack should pace packets itself. + */ +static inline bool tcp_needs_internal_pacing(const struct sock *sk) +{ + return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; +} + /* Something is really bad, we could not queue an additional packet, * because qdisc is full or receiver sent a 0 window. * We do not want to add fuel to the fire, or abort too early, @@@ -1376,8 -1364,7 +1378,8 @@@ static inline bool tcp_paws_check(cons { if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) return true; - if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) + if (unlikely(!time_before32(ktime_get_seconds(), + rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@@ -1407,8 -1394,7 +1409,8 @@@ static inline bool tcp_paws_reject(cons
However, we can relax time bounds for RST segments to MSL. */ - if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) + if (rst && !time_before32(ktime_get_seconds(), + rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) return false; return true; } @@@ -1794,7 -1780,7 +1796,7 @@@ void tcp_v4_destroy_sock(struct sock *s
struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); int tcp_gro_complete(struct sk_buff *skb);
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --combined lib/rhashtable.c index 0e04947b7e0c,e5c8586cf717..ae4223e0f5bc --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@@ -28,7 -28,6 +28,7 @@@ #include <linux/rhashtable.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U @@@ -116,7 -115,8 +116,7 @@@ static void bucket_table_free_rcu(struc
static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, - unsigned int shifted, - unsigned int nhash) + bool leaf) { union nested_table *ntbl; int i; @@@ -127,9 -127,10 +127,9 @@@
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
- if (ntbl && shifted) { - for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) - INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, - (i << shifted) | nhash); + if (ntbl && leaf) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) + INIT_RHT_NULLS_HEAD(ntbl[i].bucket); }
rcu_assign_pointer(*prev, ntbl); @@@ -155,7 -156,7 +155,7 @@@ static struct bucket_table *nested_buck return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, - 0, 0)) { + false)) { kfree(tbl); return NULL; } @@@ -205,7 -206,7 +205,7 @@@ static struct bucket_table *bucket_tabl tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++) - INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl; } @@@ -226,7 -227,8 +226,7 @@@ static struct bucket_table *rhashtable_ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct bucket_table *new_tbl = rhashtable_last_table(ht, - rht_dereference_rcu(old_tbl->future_tbl, ht)); + struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); int err = -EAGAIN; struct rhash_head *head, *next, *entry; @@@ -296,14 -298,21 +296,14 @@@ static int rhashtable_rehash_attach(str struct bucket_table *old_tbl, struct bucket_table *new_tbl) { - /* Protect future_tbl using the first bucket lock. */ - spin_lock_bh(old_tbl->locks); - - /* Did somebody beat us to it? */ - if (rcu_access_pointer(old_tbl->future_tbl)) { - spin_unlock_bh(old_tbl->locks); - return -EEXIST; - } - /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. + * As cmpxchg() provides strong barriers, we do not need + * rcu_assign_pointer(). */ - rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- spin_unlock_bh(old_tbl->locks); + if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) + return -EEXIST;
return 0; } @@@ -466,7 -475,7 +466,7 @@@ static int rhashtable_insert_rehash(str
fail: /* Do not fail the insert if someone else did a rehash. */ - if (likely(rcu_dereference_raw(tbl->future_tbl))) + if (likely(rcu_access_pointer(tbl->future_tbl))) return 0;
/* Schedule async rehash to retry allocation in process context. */ @@@ -539,7 -548,7 +539,7 @@@ static struct bucket_table *rhashtable_ if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data);
- new_tbl = rcu_dereference(tbl->future_tbl); + new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (new_tbl) return new_tbl;
@@@ -598,7 -607,7 +598,7 @@@ static void *rhashtable_try_insert(stru break;
spin_unlock_bh(lock); - tbl = rcu_dereference(tbl->future_tbl); + tbl = rht_dereference_rcu(tbl->future_tbl, ht); }
data = rhashtable_lookup_one(ht, tbl, hash, key, obj); @@@ -765,7 -774,7 +765,7 @@@ int rhashtable_walk_start_check(struct skip++; if (list == iter->list) { iter->p = p; - skip = skip; + iter->skip = skip; goto found; } } @@@ -955,8 -964,16 +955,16 @@@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop)
static size_t rounded_hashtable_size(const struct rhashtable_params *params) { - return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), - (unsigned long)params->min_size); + size_t retsize; + + if (params->nelem_hint) + retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + (unsigned long)params->min_size); + else + retsize = max(HASH_DEFAULT_SIZE, + (unsigned long)params->min_size); + + return retsize; }
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) @@@ -985,6 -1002,7 +993,6 @@@ * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, - * .nulls_base = (1U << RHT_BASE_SHIFT), * }; * * Configuration Example 2: Variable length keys @@@ -1012,12 -1030,13 +1020,10 @@@ int rhashtable_init(struct rhashtable * struct bucket_table *tbl; size_t size;
- size = HASH_DEFAULT_SIZE; - if ((!params->key_len && !params->obj_hashfn) || (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL;
- if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) - return -EINVAL; - memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); @@@ -1037,8 -1056,7 +1043,7 @@@
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
- if (params->nelem_hint) - size = rounded_hashtable_size(&ht->p); + size = rounded_hashtable_size(&ht->p);
if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); @@@ -1082,6 -1100,10 +1087,6 @@@ int rhltable_init(struct rhltable *hlt { int err;
- /* No rhlist NULLs marking for now. */ - if (params->nulls_base) - return -EINVAL; - err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; @@@ -1126,13 -1148,14 +1131,14 @@@ void rhashtable_free_and_destroy(struc void (*free_fn)(void *ptr, void *arg), void *arg) { - struct bucket_table *tbl; + struct bucket_table *tbl, *next_tbl; unsigned int i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); + restart: if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; @@@ -1149,7 -1172,12 +1155,12 @@@ } }
+ next_tbl = rht_dereference(tbl->future_tbl, ht); bucket_table_free(tbl); + if (next_tbl) { + tbl = next_tbl; + goto restart; + } mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); @@@ -1199,18 -1227,25 +1210,18 @@@ struct rhash_head __rcu **rht_bucket_ne unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; - unsigned int shifted; - unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; - nhash = index; - shifted = tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0, nhash); + size <= (1 << shift));
while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; - nhash |= index << shifted; - shifted += shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0, - nhash); + size <= (1 << shift)); }
if (!ntbl) diff --combined net/batman-adv/debugfs.c index 95a94160baf8,87479c60670e..3cb82378300b --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@@ -19,6 -19,7 +19,7 @@@ #include "debugfs.h" #include "main.h"
+ #include <linux/dcache.h> #include <linux/debugfs.h> #include <linux/err.h> #include <linux/errno.h> @@@ -117,7 -118,7 +118,7 @@@ static int batadv_bla_backbone_table_op
#ifdef CONFIG_BATMAN_ADV_DAT /** - * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache + * batadv_dat_cache_open() - Prepare file handler for reads from dat_cache * @inode: inode which was opened * @file: file handle to be initialized * @@@ -344,6 -345,25 +345,25 @@@ out }
/** + * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif + * @hard_iface: hard interface which was renamed + */ + void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface) + { + const char *name = hard_iface->net_dev->name; + struct dentry *dir; + struct dentry *d; + + dir = hard_iface->debug_dir; + if (!dir) + return; + + d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name); + if (!d) + pr_err("Can't rename debugfs dir to %s\n", name); + } + + /** * batadv_debugfs_del_hardif() - delete the base directory for a hard interface * in debugfs. * @hard_iface: hard interface which is deleted. @@@ -414,6 -434,26 +434,26 @@@ out }
/** + * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif + * @dev: net_device which was renamed + */ + void batadv_debugfs_rename_meshif(struct net_device *dev) + { + struct batadv_priv *bat_priv = netdev_priv(dev); + const char *name = dev->name; + struct dentry *dir; + struct dentry *d; + + dir = bat_priv->debug_dir; + if (!dir) + return; + + d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name); + if (!d) + pr_err("Can't rename debugfs dir to %s\n", name); + } + + /** * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries * @dev: netdev struct of the soft interface */ diff --combined net/core/filter.c index b9ec916f4e3a,06da770f543f..104d560946da --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -459,11 -459,21 +459,21 @@@ static bool convert_bpf_ld_abs(struct s (!unaligned_ok && offset >= 0 && offset + ip_align >= 0 && offset + ip_align % size == 0))) { + bool ldx_off_ok = offset <= S16_MAX; + *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); - *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian); - *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, - offset); + *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, + size, 2 + endian + (!ldx_off_ok * 2)); + if (ldx_off_ok) { + *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, + BPF_REG_D, offset); + } else { + *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); + *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, + BPF_REG_TMP, 0); + } if (endian) *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); *insn++ = BPF_JMP_A(8); @@@ -1762,6 -1772,37 +1772,37 @@@ static const struct bpf_func_proto bpf_ .arg2_type = ARG_ANYTHING, };
+ static inline int sk_skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) + { + int err = __bpf_try_make_writable(skb, write_len); + + bpf_compute_data_end_sk_skb(skb); + return err; + } + + BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) + { + /* Idea is the following: should the needed direct read/write + * test fail during runtime, we can pull in more data and redo + * again, since implicitly, we invalidate previous checks here. + * + * Or, since we know how much we need to make read/writeable, + * this can be done once at the program beginning for direct + * access case. By this we overcome limitations of only current + * headroom being accessible. + */ + return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); + } + + static const struct bpf_func_proto sk_skb_pull_data_proto = { + .func = sk_skb_pull_data, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + }; + BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { @@@ -2779,7 -2820,8 +2820,8 @@@ static int bpf_skb_net_shrink(struct sk
static u32 __bpf_skb_max_len(const struct sk_buff *skb) { - return skb->dev->mtu + skb->dev->hard_header_len; + return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : + SKB_MAX_ALLOC; }
static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) @@@ -2863,8 -2905,8 +2905,8 @@@ static int bpf_skb_trim_rcsum(struct sk return __skb_trim_rcsum(skb, new_len); }
- BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, - u64, flags) + static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, + u64 flags) { u32 max_len = __bpf_skb_max_len(skb); u32 min_len = __bpf_skb_min_len(skb); @@@ -2900,6 -2942,13 +2942,13 @@@ if (!ret && skb_is_gso(skb)) skb_gso_reset(skb); } + return ret; + } + + BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, + u64, flags) + { + int ret = __bpf_skb_change_tail(skb, new_len, flags);
bpf_compute_data_pointers(skb); return ret; @@@ -2914,9 -2963,27 +2963,27 @@@ static const struct bpf_func_proto bpf_ .arg3_type = ARG_ANYTHING, };
- BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, + BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { + int ret = __bpf_skb_change_tail(skb, new_len, flags); + + bpf_compute_data_end_sk_skb(skb); + return ret; + } + + static const struct bpf_func_proto sk_skb_change_tail_proto = { + .func = sk_skb_change_tail, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + }; + + static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, + u64 flags) + { u32 max_len = __bpf_skb_max_len(skb); u32 new_len = skb->len + head_room; int ret; @@@ -2941,8 -3008,16 +3008,16 @@@ skb_reset_mac_header(skb); }
+ return ret; + } + + BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, + u64, flags) + { + int ret = __bpf_skb_change_head(skb, head_room, flags); + bpf_compute_data_pointers(skb); - return 0; + return ret; }
static const struct bpf_func_proto bpf_skb_change_head_proto = { @@@ -2954,6 -3029,23 +3029,23 @@@ .arg3_type = ARG_ANYTHING, };
+ BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, + u64, flags) + { + int ret = __bpf_skb_change_head(skb, head_room, flags); + + bpf_compute_data_end_sk_skb(skb); + return ret; + } + + static const struct bpf_func_proto sk_skb_change_head_proto = { + .func = sk_skb_change_head, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + }; static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : @@@ -3046,12 -3138,16 +3138,16 @@@ static int __bpf_tx_xdp(struct net_devi u32 index) { struct xdp_frame *xdpf; - int sent; + int err, sent;
if (!dev->netdev_ops->ndo_xdp_xmit) { return -EOPNOTSUPP; }
+ err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); + if (unlikely(err)) + return err; + xdpf = convert_to_xdp_frame(xdp); if (unlikely(!xdpf)) return -EOVERFLOW; @@@ -3285,7 -3381,8 +3381,8 @@@ int xdp_do_generic_redirect(struct net_ goto err; }
- if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd)))) + err = xdp_ok_fwd_dev(fwd, skb->len); + if (unlikely(err)) goto err;
skb->dev = fwd; @@@ -3582,7 -3679,7 +3679,7 @@@ BPF_CALL_3(bpf_skb_set_tunnel_opt, stru if (unlikely(size > IP_TUNNEL_OPTS_MAX)) return -ENOMEM;
- ip_tunnel_info_opts_set(info, from, size); + ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
return 0; } @@@ -4439,10 -4536,10 +4536,10 @@@ static const struct bpf_func_proto bpf_ .arg4_type = ARG_CONST_SIZE };
+ #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, const void *, from, u32, len) { - #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); void *srh_tlvs, *srh_end, *ptr; @@@ -4468,9 -4565,6 +4565,6 @@@
memcpy(skb->data + offset, from, len); return 0; - #else /* CONFIG_IPV6_SEG6_BPF */ - return -EOPNOTSUPP; - #endif }
static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { @@@ -4486,7 -4580,6 +4580,6 @@@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, u32, action, void *, param, u32, param_len) { - #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh; @@@ -4534,9 -4627,6 +4627,6 @@@ default: return -EINVAL; } - #else /* CONFIG_IPV6_SEG6_BPF */ - return -EOPNOTSUPP; - #endif }
static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { @@@ -4552,7 -4642,6 +4642,6 @@@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, s32, len) { - #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); void *srh_end, *srh_tlvs, *ptr; @@@ -4596,9 -4685,6 +4685,6 @@@ srh_state->hdrlen += len; srh_state->valid = 0; return 0; - #else /* CONFIG_IPV6_SEG6_BPF */ - return -EOPNOTSUPP; - #endif }
static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { @@@ -4609,6 -4695,7 +4695,7 @@@ .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; + #endif /* CONFIG_IPV6_SEG6_BPF */
bool bpf_helper_changes_pkt_data(void *func) { @@@ -4617,9 -4704,12 +4704,12 @@@ func == bpf_skb_store_bytes || func == bpf_skb_change_proto || func == bpf_skb_change_head || + func == sk_skb_change_head || func == bpf_skb_change_tail || + func == sk_skb_change_tail || func == bpf_skb_adjust_room || func == bpf_skb_pull_data || + func == sk_skb_pull_data || func == bpf_clone_redirect || func == bpf_l3_csum_replace || func == bpf_l4_csum_replace || @@@ -4627,11 -4717,12 +4717,12 @@@ func == bpf_xdp_adjust_meta || func == bpf_msg_pull_data || func == bpf_xdp_adjust_tail || - func == bpf_lwt_push_encap || + #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) func == bpf_lwt_seg6_store_bytes || func == bpf_lwt_seg6_adjust_srh || - func == bpf_lwt_seg6_action - ) + func == bpf_lwt_seg6_action || + #endif + func == bpf_lwt_push_encap) return true;
return false; @@@ -4660,7 -4751,6 +4751,7 @@@ bpf_base_func_proto(enum bpf_func_id fu case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); + /* else: fall through */ default: return NULL; } @@@ -4872,11 -4962,11 +4963,11 @@@ sk_skb_func_proto(enum bpf_func_id func case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; case BPF_FUNC_skb_pull_data: - return &bpf_skb_pull_data_proto; + return &sk_skb_pull_data_proto; case BPF_FUNC_skb_change_tail: - return &bpf_skb_change_tail_proto; + return &sk_skb_change_tail_proto; case BPF_FUNC_skb_change_head: - return &bpf_skb_change_head_proto; + return &sk_skb_change_head_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: @@@ -4967,12 -5057,14 +5058,14 @@@ static const struct bpf_func_proto lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { + #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) case BPF_FUNC_lwt_seg6_store_bytes: return &bpf_lwt_seg6_store_bytes_proto; case BPF_FUNC_lwt_seg6_action: return &bpf_lwt_seg6_action_proto; case BPF_FUNC_lwt_seg6_adjust_srh: return &bpf_lwt_seg6_adjust_srh_proto; + #endif default: return lwt_out_func_proto(func_id, prog); } diff --combined net/core/skbuff.c index c4e24ac27464,8e51f8555e11..0c1a00672ba9 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -858,6 -858,7 +858,7 @@@ static struct sk_buff *__skb_clone(stru n->cloned = 1; n->nohdr = 0; n->peeked = 0; + C(pfmemalloc); n->destructor = NULL; C(tail); C(end); @@@ -3815,14 -3816,14 +3816,14 @@@ err } EXPORT_SYMBOL_GPL(skb_segment);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) { struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); - struct sk_buff *lp, *p = *head; unsigned int delta_truesize; + struct sk_buff *lp;
if (unlikely(p->len + len >= 65536)) return -E2BIG; @@@ -4898,6 -4899,7 +4899,6 @@@ EXPORT_SYMBOL(skb_try_coalesce) */ void skb_scrub_packet(struct sk_buff *skb, bool xnet) { - skb->tstamp = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; skb->ignore_df = 0; @@@ -4910,8 -4912,8 +4911,8 @@@ return;
ipvs_reset(skb); - skb_orphan(skb); skb->mark = 0; + skb->tstamp = 0; } EXPORT_SYMBOL_GPL(skb_scrub_packet);
diff --combined net/ipv4/inet_fragment.c index 316518f87294,1e4cf3ab560f..d3162baca9f1 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@@ -20,7 -20,6 +20,7 @@@ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/slab.h> +#include <linux/rhashtable.h>
#include <net/sock.h> #include <net/inet_frag.h> @@@ -91,7 -90,7 +91,7 @@@ static void inet_frags_free_cb(void *pt
void inet_frags_exit_net(struct netns_frags *nf) { - nf->low_thresh = 0; /* prevent creation of new frags */ + nf->high_thresh = 0; /* prevent creation of new frags */
rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); } diff --combined net/ipv4/tcp.c index e3704a49164b,4491faf83f4f..bce53b1728a6 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -817,7 -817,8 +817,7 @@@ ssize_t tcp_splice_read(struct socket * * This occurs when user tries to read * from never connected socket. */ - if (!sock_flag(sk, SOCK_DONE)) - ret = -ENOTCONN; + ret = -ENOTCONN; break; } if (!timeo) { @@@ -1240,7 -1241,7 +1240,7 @@@ int tcp_sendmsg_locked(struct sock *sk /* 'common' sending to sendq */ }
- sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) { @@@ -1274,6 -1275,9 +1274,6 @@@ restart int linear;
new_segment: - /* Allocate new segment. If the interface is SG, - * allocate skb fitting to single page. - */ if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf;
@@@ -1994,7 -1998,7 +1994,7 @@@ int tcp_recvmsg(struct sock *sk, struc * shouldn't happen. */ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), - "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags)) break; @@@ -2009,7 -2013,7 +2009,7 @@@ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; WARN(!(flags & MSG_PEEK), - "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); }
@@@ -2038,10 -2042,13 +2038,10 @@@ break;
if (sk->sk_state == TCP_CLOSE) { - if (!sock_flag(sk, SOCK_DONE)) { - /* This occurs when user tries to read - * from never connected socket. - */ - copied = -ENOTCONN; - break; - } + /* This occurs when user tries to read + * from never connected socket. + */ + copied = -ENOTCONN; break; }
@@@ -2555,6 -2562,8 +2555,8 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); + tp->copied_seq = tp->rcv_nxt; + tp->urg_data = 0; tcp_write_queue_purge(sk); tcp_fastopen_active_disable_ofo_check(sk); skb_rbtree_purge(&tp->out_of_order_queue); @@@ -2567,7 -2576,6 +2569,7 @@@ sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; + tp->rcv_rtt_last_tsecr = 0; tp->write_seq += tp->max_window + 2; if (tp->write_seq == 0) tp->write_seq = 1; @@@ -2815,14 -2823,17 +2817,17 @@@ static int do_tcp_setsockopt(struct soc case TCP_REPAIR: if (!tcp_can_repair_sock(sk)) err = -EPERM; - else if (val == 1) { + else if (val == TCP_REPAIR_ON) { tp->repair = 1; sk->sk_reuse = SK_FORCE_REUSE; tp->repair_queue = TCP_NO_QUEUE; - } else if (val == 0) { + } else if (val == TCP_REPAIR_OFF) { tp->repair = 0; sk->sk_reuse = SK_NO_REUSE; tcp_send_window_probe(sk); + } else if (val == TCP_REPAIR_OFF_NO_WP) { + tp->repair = 0; + sk->sk_reuse = SK_NO_REUSE; } else err = -EINVAL;
@@@ -3714,8 -3725,7 +3719,7 @@@ int tcp_abort(struct sock *sk, int err struct request_sock *req = inet_reqsk(sk);
local_bh_disable(); - inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, - req); + inet_csk_reqsk_queue_drop(req->rsk_listener, req); local_bh_enable(); return 0; } diff --combined net/ipv4/tcp_ipv4.c index dc415c66a33a,3b2711e33e4c..9e041fa5c545 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@@ -155,13 -155,25 +155,26 @@@ int tcp_twsk_unique(struct sock *sk, st and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && - (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { + (!twp || (reuse && time_after32(ktime_get_seconds(), + tcptw->tw_ts_recent_stamp)))) { - tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; - if (tp->write_seq == 0) - tp->write_seq = 1; - tp->rx_opt.ts_recent = tcptw->tw_ts_recent; - tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; + /* In case of repair and re-using TIME-WAIT sockets we still + * want to be sure that it is safe as above but honor the + * sequence numbers and time stamps set as part of the repair + * process. + * + * Without this check re-using a TIME-WAIT socket with TCP + * repair would accumulate a -1 on the repair assigned + * sequence number. The first time it is reused the sequence + * is -1, the second time -2, etc. This fixes that issue + * without appearing to create any others. + */ + if (likely(!tp->repair)) { + tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; + if (tp->write_seq == 0) + tp->write_seq = 1; + tp->rx_opt.ts_recent = tcptw->tw_ts_recent; + tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; + } sock_hold(sktw); return 1; } diff --combined net/ipv4/tcp_output.c index f8f6129160dd,00e5a300ddb9..6cbab56e7407 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -973,6 -973,17 +973,6 @@@ enum hrtimer_restart tcp_pace_kick(stru return HRTIMER_NORESTART; }
-/* BBR congestion control needs pacing. - * Same remark for SO_MAX_PACING_RATE. - * sch_fq packet scheduler is efficiently handling pacing, - * but is not always installed/used. - * Return true if TCP stack should pace packets itself. - */ -static bool tcp_needs_internal_pacing(const struct sock *sk) -{ - return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; -} - static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) { u64 len_ns; @@@ -984,6 -995,9 +984,6 @@@ if (!rate || rate == ~0U) return;
- /* Should account for header sizes as sch_fq does, - * but lets make things simple. - */ len_ns = (u64)skb->len * NSEC_PER_SEC; do_div(len_ns, rate); hrtimer_start(&tcp_sk(sk)->pacing_timer, @@@ -3509,8 -3523,6 +3509,6 @@@ void tcp_send_delayed_ack(struct sock * int ato = icsk->icsk_ack.ato; unsigned long timeout;
- tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); - if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ / 2; @@@ -3567,8 -3579,6 +3565,6 @@@ void tcp_send_ack(struct sock *sk if (sk->sk_state == TCP_CLOSE) return;
- tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); - /* We are not putting this on the write queue, so * tcp_transmit_skb() will set the ownership to this * sock. diff --combined net/ipv6/ip6_gre.c index 367177786e34,cd2cfb04e5d8..fc7dd3a04360 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@@ -927,7 -927,6 +927,6 @@@ tx_err static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { - struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ip6_tnl *t = netdev_priv(dev); struct dst_entry *dst = skb_dst(skb); struct net_device_stats *stats; @@@ -990,8 -989,6 +989,8 @@@ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
dsfield = key->tos; + if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) + goto tx_err; md = ip_tunnel_info_opts(tun_info); if (!md) goto tx_err; @@@ -1012,6 -1009,8 +1011,8 @@@ goto tx_err; } } else { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + switch (skb->protocol) { case htons(ETH_P_IP): memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --combined net/ipv6/ipv6_sockglue.c index fabe3ba1bddc,568ca4187cd1..c0cac9cc3a28 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@@ -398,6 -398,12 +398,12 @@@ static int do_ipv6_setsockopt(struct so case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; + struct ipv6_opt_hdr *new = NULL; + + /* hop-by-hop / destination options are privileged option */ + retv = -EPERM; + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) + break;
/* remove any sticky options header with a zero option * length, per RFC3542. @@@ -409,17 -415,22 +415,22 @@@ else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; - - /* hop-by-hop / destination options are privileged option */ - retv = -EPERM; - if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) - break; + else { + new = memdup_user(optval, optlen); + if (IS_ERR(new)) { + retv = PTR_ERR(new); + break; + } + if (unlikely(ipv6_optlen(new) > optlen)) { + kfree(new); + goto e_inval; + } + }
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); - opt = ipv6_renew_options(sk, opt, optname, - (struct ipv6_opt_hdr __user *)optval, - optlen); + opt = ipv6_renew_options(sk, opt, optname, new); + kfree(new); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; @@@ -489,6 -500,7 +500,6 @@@ sticky_done struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; - struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6;
memset(&fl6, 0, sizeof(fl6)); @@@ -521,7 -533,7 +532,7 @@@ msg.msg_control = (void *)(opt+1); ipc6.opt = opt;
- retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk); + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6); if (retv) goto done; update: @@@ -717,8 -729,9 +728,9 @@@ done struct sockaddr_in6 *psin6;
psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; - retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, - &psin6->sin6_addr); + retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface, + &psin6->sin6_addr, + MCAST_INCLUDE); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; diff --combined net/netfilter/nf_conntrack_core.c index 85ab2fd6a665,3d5280425027..805500197c22 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@@ -1683,41 -1683,6 +1683,41 @@@ static int nf_conntrack_update(struct n return 0; }
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + const struct nf_conntrack_tuple *src_tuple; + const struct nf_conntrack_tuple_hash *hash; + struct nf_conntrack_tuple srctuple; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + ct = nf_ct_get(skb, &ctinfo); + if (ct) { + src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); + memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); + return true; + } + + if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), + NFPROTO_IPV4, dev_net(skb->dev), + &srctuple)) + return false; + + hash = nf_conntrack_find_get(dev_net(skb->dev), + &nf_ct_zone_dflt, + &srctuple); + if (!hash) + return false; + + ct = nf_ct_tuplehash_to_ctrack(hash); + src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); + memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); + nf_ct_put(ct); + + return true; +} + /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(int (*iter)(struct nf_conn *i, void *data), @@@ -2078,7 -2043,7 +2078,7 @@@ int nf_conntrack_set_hashsize(const cha return -EOPNOTSUPP;
/* On boot, we can set this without any fancy locking. */ - if (!nf_conntrack_htable_size) + if (!nf_conntrack_hash) return param_set_uint(val, kp);
rc = kstrtouint(val, 0, &hashsize); @@@ -2239,7 -2204,6 +2239,7 @@@ err_cachep static struct nf_ct_hook nf_conntrack_hook = { .update = nf_conntrack_update, .destroy = destroy_conntrack, + .get_tuple_skb = nf_conntrack_get_tuple_skb, };
void nf_conntrack_init_end(void) diff --combined net/packet/af_packet.c index 00189a3b07f2,9b27d0cd766d..e3e00d3a972e --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@@ -275,10 -275,9 +275,10 @@@ static bool packet_use_direct_xmit(cons return po->xmit == packet_direct_xmit; }
-static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) +static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { - return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; + return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL); }
static u16 packet_pick_tx_queue(struct sk_buff *skb) @@@ -292,7 -291,7 +292,7 @@@ __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { - queue_index = __packet_pick_tx_queue(dev, skb); + queue_index = __packet_pick_tx_queue(dev, skb, NULL); }
return queue_index; @@@ -1952,7 -1951,7 +1952,7 @@@ retry goto out_unlock; }
- sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) @@@ -1963,7 -1962,6 +1963,7 @@@ skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc.transmit_time;
sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
@@@ -2459,7 -2457,6 +2459,7 @@@ static int tpacket_fill_skb(struct pack skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; + skb->tstamp = sockc->transmit_time; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw;
@@@ -2636,7 -2633,7 +2636,7 @@@ static int tpacket_snd(struct packet_so if (unlikely(!(dev->flags & IFF_UP))) goto out_put;
- sockc.tsflags = po->sk.sk_tsflags; + sockcm_init(&sockc, &po->sk); if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) @@@ -2832,7 -2829,7 +2832,7 @@@ static int packet_snd(struct socket *so if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock;
- sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); @@@ -2881,6 -2878,8 +2881,8 @@@ goto out_free; } else if (reserve) { skb_reserve(skb, -reserve); + if (len < reserve) + skb_reset_network_header(skb); }
/* Returns -EFAULT on error */ @@@ -2906,7 -2905,6 +2908,7 @@@ skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; + skb->tstamp = sockc.transmit_time;
if (has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); diff --combined net/sched/act_csum.c index bd232d3bd022,6e7124e57918..4e8c383f379e --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@@ -46,8 -46,7 +46,8 @@@ static struct tc_action_ops act_csum_op
static int tcf_csum_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, csum_net_id); struct tcf_csum_params *params_old, *params_new; @@@ -67,24 -66,18 +67,24 @@@ return -EINVAL; parm = nla_data(tb[TCA_CSUM_PARMS]);
- if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_csum_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } + } else { + return err; }
p = to_tcf_csum(*a); @@@ -92,12 -85,13 +92,12 @@@
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } params_old = rtnl_dereference(p->params);
- params_new->action = parm->action; + p->tcf_action = parm->action; params_new->update_flags = parm->update_flags; rcu_assign_pointer(p->params, params_new); if (params_old) @@@ -567,7 -561,7 +567,7 @@@ static int tcf_csum(struct sk_buff *skb tcf_lastuse_update(&p->tcf_tm); bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
- action = params->action; + action = READ_ONCE(p->tcf_action); if (unlikely(action == TC_ACT_SHOT)) goto drop_stats;
@@@ -603,13 -597,13 +603,13 @@@ static int tcf_csum_dump(struct sk_buf struct tcf_csum_params *params; struct tc_csum opt = { .index = p->tcf_index, - .refcnt = p->tcf_refcnt - ref, - .bindcnt = p->tcf_bindcnt - bind, + .refcnt = refcount_read(&p->tcf_refcnt) - ref, + .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, + .action = p->tcf_action, }; struct tcf_t t;
params = rtnl_dereference(p->params); - opt.action = params->action; opt.update_flags = params->update_flags;
if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) @@@ -659,13 -653,6 +659,13 @@@ static size_t tcf_csum_get_fill_size(co return nla_total_size(sizeof(struct tc_csum)); }
+static int tcf_csum_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, csum_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_csum_ops = { .kind = "csum", .type = TCA_ACT_CSUM, @@@ -677,7 -664,6 +677,7 @@@ .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, + .delete = tcf_csum_delete, .size = sizeof(struct tcf_csum), };
diff --combined net/sched/act_tunnel_key.c index 22f26e9ea8f1,9bc6c2ae98a5..f811850fd1d0 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@@ -13,7 -13,6 +13,7 @@@ #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> +#include <net/geneve.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/dst.h> @@@ -37,7 -36,7 +37,7 @@@ static int tunnel_key_act(struct sk_buf
tcf_lastuse_update(&t->tcf_tm); bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); - action = params->action; + action = READ_ONCE(t->tcf_action);
switch (params->tcft_action) { case TCA_TUNNEL_KEY_ACT_RELEASE: @@@ -58,135 -57,6 +58,135 @@@ return action; }
+static const struct nla_policy +enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy +geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, + .len = 128 }, +}; + +static int +tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1]; + int err, data_len, opt_len; + u8 *data; + + err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX, + nla, geneve_opt_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); + return -EINVAL; + } + + data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]); + data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]); + if (data_len < 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); + return -ERANGE; + } + if (data_len % 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); + return -ERANGE; + } + + opt_len = sizeof(struct geneve_opt) + data_len; + if (dst) { + struct geneve_opt *opt = dst; + + WARN_ON(dst_len < opt_len); + + opt->opt_class = + nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]); + opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]); + opt->length = data_len / 4; /* length is in units of 4 bytes */ + opt->r1 = 0; + opt->r2 = 0; + opt->r3 = 0; + + memcpy(opt + 1, data, data_len); + } + + return opt_len; +} + +static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, + int dst_len, struct netlink_ext_ack *extack) +{ + int err, rem, opt_len, len = nla_len(nla), opts_len = 0; + const struct nlattr *attr, *head = nla_data(nla); + + err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX, + enc_opts_policy, extack); + if (err) + return err; + + nla_for_each_attr(attr, head, len, rem) { + switch (nla_type(attr)) { + case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: + opt_len = tunnel_key_copy_geneve_opt(attr, dst, + dst_len, extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + if (dst) { + dst_len -= opt_len; + dst += opt_len; + } + break; + } + } + + if (!opts_len) { + NL_SET_ERR_MSG(extack, "Empty list of tunnel options"); + return -EINVAL; + } + + if (rem > 0) { + NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes"); + return -EINVAL; + } + + return opts_len; +} + +static int tunnel_key_get_opts_len(struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + return tunnel_key_copy_opts(nla, NULL, 0, extack); +} + +static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info, + int opts_len, struct netlink_ext_ack *extack) +{ + info->options_len = opts_len; + switch (nla_type(nla_data(nla))) { + case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: +#if IS_ENABLED(CONFIG_INET) + info->key.tun_flags |= TUNNEL_GENEVE_OPT; + return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), + opts_len, extack); +#else + return -EAFNOSUPPORT; +#endif + default: + NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type"); + return -EINVAL; + } +} + static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) }, [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, @@@ -196,15 -66,11 +196,15 @@@ [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16}, [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED }, + [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, };
static int tunnel_key_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; @@@ -215,35 -81,24 +215,35 @@@ struct tcf_tunnel_key *t; bool exists = false; __be16 dst_port = 0; + int opts_len = 0; __be64 key_id; __be16 flags; + u8 tos, ttl; int ret = 0; int err;
- if (!nla) + if (!nla) { + NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed"); return -EINVAL; + }
err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy, - NULL); - if (err < 0) + extack); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes"); return err; + }
- if (!tb[TCA_TUNNEL_KEY_PARMS]) + if (!tb[TCA_TUNNEL_KEY_PARMS]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key parameters"); return -EINVAL; + }
parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0;
@@@ -252,7 -107,6 +252,7 @@@ break; case TCA_TUNNEL_KEY_ACT_SET: if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key id"); ret = -EINVAL; goto err_out; } @@@ -267,22 -121,6 +267,22 @@@ if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
+ if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) { + opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS], + extack); + if (opts_len < 0) { + ret = opts_len; + goto err_out; + } + } + + tos = 0; + if (tb[TCA_TUNNEL_KEY_ENC_TOS]) + tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]); + ttl = 0; + if (tb[TCA_TUNNEL_KEY_ENC_TTL]) + ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]); + if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) { __be32 saddr; @@@ -291,9 -129,9 +291,9 @@@ saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]); daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
- metadata = __ip_tun_set_dst(saddr, daddr, 0, 0, + metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl, dst_port, flags, - key_id, 0); + key_id, opts_len); } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) { struct in6_addr saddr; @@@ -302,33 -140,19 +302,33 @@@ saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]); daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
- metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port, + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, 0, flags, key_id, 0); + } else { + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); + ret = -EINVAL; + goto err_out; }
if (!metadata) { - ret = -EINVAL; + NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst"); + ret = -ENOMEM; goto err_out; }
+ if (opts_len) { + ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS], + &metadata->u.tun_info, + opts_len, extack); + if (ret < 0) + goto err_out; + } + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; break; default: + NL_SET_ERR_MSG(extack, "Unknown tunnel key action"); ret = -EINVAL; goto err_out; } @@@ -336,16 -160,14 +336,16 @@@ if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_tunnel_key_ops, bind, true); - if (ret) - return ret; + if (ret) { + NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); + goto err_out; + }
ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + NL_SET_ERR_MSG(extack, "TC IDR already exists"); + return -EEXIST; }
t = to_tunnel_key(*a); @@@ -353,14 -175,14 +353,14 @@@ ASSERT_RTNL(); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); + NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); return -ENOMEM; }
params_old = rtnl_dereference(t->params);
- params_new->action = parm->action; + t->tcf_action = parm->action; params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata;
@@@ -377,8 -199,6 +377,8 @@@ err_out: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return ret; }
@@@ -396,61 -216,6 +396,61 @@@ static void tunnel_key_release(struct t } }
+static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + int len = info->options_len; + u8 *src = (u8 *)(info + 1); + struct nlattr *start; + + start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE); + if (!start) + return -EMSGSIZE; + + while (len > 0) { + struct geneve_opt *opt = (struct geneve_opt *)src; + + if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, + opt->opt_class) || + nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, + opt->type) || + nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, + opt->length * 4, opt + 1)) + return -EMSGSIZE; + + len -= sizeof(struct geneve_opt) + opt->length * 4; + src += sizeof(struct geneve_opt) + opt->length * 4; + } + + nla_nest_end(skb, start); + return 0; +} + +static int tunnel_key_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + struct nlattr *start; + int err; + + if (!info->options_len) + return 0; + + start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS); + if (!start) + return -EMSGSIZE; + + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { + err = tunnel_key_geneve_opts_dump(skb, info); + if (err) + return err; + } else { + return -EINVAL; + } + + nla_nest_end(skb, start); + return 0; +} + static int tunnel_key_dump_addresses(struct sk_buff *skb, const struct ip_tunnel_info *info) { @@@ -487,23 -252,22 +487,23 @@@ static int tunnel_key_dump(struct sk_bu struct tcf_tunnel_key_params *params; struct tc_tunnel_key opt = { .index = t->tcf_index, - .refcnt = t->tcf_refcnt - ref, - .bindcnt = t->tcf_bindcnt - bind, + .refcnt = refcount_read(&t->tcf_refcnt) - ref, + .bindcnt = atomic_read(&t->tcf_bindcnt) - bind, + .action = t->tcf_action, }; struct tcf_t tm;
params = rtnl_dereference(t->params);
opt.t_action = params->tcft_action; - opt.action = params->action;
if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) goto nla_put_failure;
if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) { - struct ip_tunnel_key *key = - ¶ms->tcft_enc_metadata->u.tun_info.key; + struct ip_tunnel_info *info = + ¶ms->tcft_enc_metadata->u.tun_info; + struct ip_tunnel_key *key = &info->key; __be32 key_id = tunnel_id_to_key32(key->tun_id);
if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) || @@@ -511,14 -275,7 +511,14 @@@ ¶ms->tcft_enc_metadata->u.tun_info) || nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) || nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM, - !(key->tun_flags & TUNNEL_CSUM))) + !(key->tun_flags & TUNNEL_CSUM)) || + tunnel_key_opts_dump(skb, info)) + goto nla_put_failure; + + if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos)) + goto nla_put_failure; + + if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl)) goto nla_put_failure; }
@@@ -552,13 -309,6 +552,13 @@@ static int tunnel_key_search(struct ne return tcf_idr_search(tn, a, index); }
+static int tunnel_key_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .type = TCA_ACT_TUNNEL_KEY, @@@ -569,7 -319,6 +569,7 @@@ .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, + .delete = tunnel_key_delete, .size = sizeof(struct tcf_tunnel_key), };
diff --combined net/sched/cls_api.c index c51b1b12450d,f74513a7c7a8..623fe2cfe529 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@@ -277,21 -277,18 +277,21 @@@ static bool tcf_block_offload_in_use(st static int tcf_block_offload_cmd(struct tcf_block *block, struct net_device *dev, struct tcf_block_ext_info *ei, - enum tc_block_command command) + enum tc_block_command command, + struct netlink_ext_ack *extack) { struct tc_block_offload bo = {};
bo.command = command; bo.binder_type = ei->binder_type; bo.block = block; + bo.extack = extack; return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); }
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, - struct tcf_block_ext_info *ei) + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) { struct net_device *dev = q->dev_queue->dev; int err; @@@ -302,12 -299,10 +302,12 @@@ /* If tc offload feature is disabled and the block we try to bind * to already has some offloaded filters, forbid to bind. */ - if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) + if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { + NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); return -EOPNOTSUPP; + }
- err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND); + err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; return err; @@@ -327,7 -322,7 +327,7 @@@ static void tcf_block_offload_unbind(st
if (!dev->netdev_ops->ndo_setup_tc) goto no_offload_dev_dec; - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND); + err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; return; @@@ -617,7 -612,7 +617,7 @@@ int tcf_block_get_ext(struct tcf_block if (err) goto err_chain_head_change_cb_add;
- err = tcf_block_offload_bind(block, q, ei); + err = tcf_block_offload_bind(block, q, ei, extack); if (err) goto err_block_offload_bind;
@@@ -751,53 -746,18 +751,53 @@@ unsigned int tcf_block_cb_decref(struc } EXPORT_SYMBOL(tcf_block_cb_decref);
+static int +tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, + void *cb_priv, bool add, bool offload_in_use, + struct netlink_ext_ack *extack) +{ + struct tcf_chain *chain; + struct tcf_proto *tp; + int err; + + list_for_each_entry(chain, &block->chain_list, list) { + for (tp = rtnl_dereference(chain->filter_chain); tp; + tp = rtnl_dereference(tp->next)) { + if (tp->ops->reoffload) { + err = tp->ops->reoffload(tp, add, cb, cb_priv, + extack); + if (err && add) + goto err_playback_remove; + } else if (add && offload_in_use) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); + goto err_playback_remove; + } + } + } + + return 0; + +err_playback_remove: + tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, + extack); + return err; +} + struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, + struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb; + int err;
- /* At this point, playback of previous block cb calls is not supported, - * so forbid to register to block which already has some offloaded - * filters present. - */ - if (tcf_block_offload_in_use(block)) - return ERR_PTR(-EOPNOTSUPP); + /* Replay any already present rules */ + err = tcf_block_playback_offloads(block, cb, cb_priv, true, + tcf_block_offload_in_use(block), + extack); + if (err) + return ERR_PTR(err);
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); if (!block_cb) @@@ -812,22 -772,17 +812,22 @@@ EXPORT_SYMBOL(__tcf_block_cb_register)
int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb;
- block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); + block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv, + extack); return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0; } EXPORT_SYMBOL(tcf_block_cb_register);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb) { + tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv, + false, tcf_block_offload_in_use(block), + NULL); list_del(&block_cb->list); kfree(block_cb); } @@@ -841,7 -796,7 +841,7 @@@ void tcf_block_cb_unregister(struct tcf block_cb = tcf_block_cb_lookup(block, cb, cb_ident); if (!block_cb) return; - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); } EXPORT_SYMBOL(tcf_block_cb_unregister);
@@@ -1098,7 -1053,7 +1098,7 @@@ static void tfilter_notify_chain(struc for (tp = rtnl_dereference(chain->filter_chain); tp; tp = rtnl_dereference(tp->next)) tfilter_notify(net, oskb, n, tp, block, - q, parent, 0, event, false); + q, parent, NULL, event, false); }
static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, @@@ -1489,7 -1444,7 +1489,7 @@@ static bool tcf_chain_dump(struct tcf_c memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (cb->args[1] == 0) { - if (tcf_fill_node(net, skb, tp, block, q, parent, 0, + if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) @@@ -1508,9 -1463,7 +1508,9 @@@ arg.w.stop = 0; arg.w.skip = cb->args[1] - 1; arg.w.count = 0; + arg.w.cookie = cb->args[2]; tp->ops->walk(tp, &arg.w); + cb->args[2] = arg.w.cookie; cb->args[1] = arg.w.count + 1; if (arg.w.stop) return false; @@@ -1611,7 -1564,11 +1611,7 @@@ out void tcf_exts_destroy(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT - LIST_HEAD(actions); - - ASSERT_RTNL(); - tcf_exts_to_list(exts, &actions); - tcf_action_destroy(&actions, TCA_ACT_UNBIND); + tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); kfree(exts->actions); exts->nr_actions = 0; #endif @@@ -1630,7 -1587,7 +1630,7 @@@ int tcf_exts_validate(struct net *net, if (exts->police && tb[exts->police]) { act = tcf_action_init_1(net, tp, tb[exts->police], rate_tlv, "police", ovr, - TCA_ACT_BIND, extack); + TCA_ACT_BIND, true, extack); if (IS_ERR(act)) return PTR_ERR(act);
@@@ -1638,15 -1595,17 +1638,15 @@@ exts->actions[0] = act; exts->nr_actions = 1; } else if (exts->action && tb[exts->action]) { - LIST_HEAD(actions); - int err, i = 0; + int err;
err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, NULL, ovr, TCA_ACT_BIND, - &actions, &attr_size, extack); - if (err) + exts->actions, &attr_size, true, + extack); + if (err < 0) return err; - list_for_each_entry(act, &actions, list) - exts->actions[i++] = act; - exts->nr_actions = i; + exts->nr_actions = err; } exts->net = net; } @@@ -1695,11 -1654,14 +1695,11 @@@ int tcf_exts_dump(struct sk_buff *skb, * tc data even if iproute2 was newer - jhs */ if (exts->type != TCA_OLD_COMPAT) { - LIST_HEAD(actions); - nest = nla_nest_start(skb, exts->action); if (nest == NULL) goto nla_put_failure;
- tcf_exts_to_list(exts, &actions); - if (tcf_action_dump(skb, &actions, 0, 0) < 0) + if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } else if (exts->police) { diff --combined net/smc/af_smc.c index f3fdf3714f8b,05e4ffe5aabd..143b2220c0c8 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@@ -23,7 -23,6 +23,7 @@@ #include <linux/workqueue.h> #include <linux/in.h> #include <linux/sched/signal.h> +#include <linux/if_vlan.h>
#include <net/sock.h> #include <net/tcp.h> @@@ -36,7 -35,6 +36,7 @@@ #include "smc_cdc.h" #include "smc_core.h" #include "smc_ib.h" +#include "smc_ism.h" #include "smc_pnet.h" #include "smc_tx.h" #include "smc_rx.h" @@@ -149,7 -147,8 +149,8 @@@ static int smc_release(struct socket *s smc->clcsock = NULL; } if (smc->use_fallback) { - sock_put(sk); /* passive closing */ + if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT) + sock_put(sk); /* passive closing */ sk->sk_state = SMC_CLOSED; sk->sk_state_change(sk); } @@@ -382,8 -381,8 +383,8 @@@ static int smc_clnt_conf_first_link(str return 0; }
-static void smc_conn_save_peer_info(struct smc_sock *smc, - struct smc_clc_msg_accept_confirm *clc) +static void smcr_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) { int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
@@@ -394,28 -393,6 +395,28 @@@ smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); }
+static void smcd_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + int bufsize = smc_uncompress_bufsize(clc->dmbe_size); + + smc->conn.peer_rmbe_idx = clc->dmbe_idx; + smc->conn.peer_token = clc->token; + /* msg header takes up space in the buffer */ + smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); + atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); + smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; +} + +static void smc_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + if (smc->conn.lgr->is_smcd) + smcd_conn_save_peer_info(smc, clc); + else + smcr_conn_save_peer_info(smc, clc); +} + static void smc_link_save_peer_info(struct smc_link *link, struct smc_clc_msg_accept_confirm *clc) { @@@ -441,12 -418,18 +442,18 @@@ static int smc_connect_decline_fallback { int rc;
- if (reason_code < 0) /* error, fallback is not possible */ + if (reason_code < 0) { /* error, fallback is not possible */ + if (smc->sk.sk_state == SMC_INIT) + sock_put(&smc->sk); /* passive closing */ return reason_code; + } if (reason_code != SMC_CLC_DECL_REPLY) { rc = smc_clc_send_decline(smc, reason_code); - if (rc < 0) + if (rc < 0) { + if (smc->sk.sk_state == SMC_INIT) + sock_put(&smc->sk); /* passive closing */ return rc; + } } return smc_connect_fallback(smc); } @@@ -459,8 -442,6 +466,6 @@@ static int smc_connect_abort(struct smc smc_lgr_forget(smc->conn.lgr); mutex_unlock(&smc_create_lgr_pending); smc_conn_free(&smc->conn); - if (reason_code < 0 && smc->sk.sk_state == SMC_INIT) - sock_put(&smc->sk); /* passive closing */ return reason_code; }
@@@ -482,51 -463,15 +487,51 @@@ static int smc_check_rdma(struct smc_so return reason_code; }
+/* check if there is an ISM device available for this connection. */ +/* called for connect and listen */ +static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev) +{ + /* Find ISM device with same PNETID as connecting interface */ + smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev); + if (!(*ismdev)) + return SMC_CLC_DECL_CNFERR; /* configuration error */ + return 0; +} + +/* Check for VLAN ID and register it on ISM device just for CLC handshake */ +static int smc_connect_ism_vlan_setup(struct smc_sock *smc, + struct smcd_dev *ismdev, + unsigned short vlan_id) +{ + if (vlan_id && smc_ism_get_vlan(ismdev, vlan_id)) + return SMC_CLC_DECL_CNFERR; + return 0; +} + +/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is + * used, the VLAN ID will be registered again during the connection setup. + */ +static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd, + struct smcd_dev *ismdev, + unsigned short vlan_id) +{ + if (!is_smcd) + return 0; + if (vlan_id && smc_ism_put_vlan(ismdev, vlan_id)) + return SMC_CLC_DECL_CNFERR; + return 0; +} + /* CLC handshake during connect */ -static int smc_connect_clc(struct smc_sock *smc, +static int smc_connect_clc(struct smc_sock *smc, int smc_type, struct smc_clc_msg_accept_confirm *aclc, - struct smc_ib_device *ibdev, u8 ibport) + struct smc_ib_device *ibdev, u8 ibport, + struct smcd_dev *ismdev) { int rc = 0;
/* do inband token exchange */ - rc = smc_clc_send_proposal(smc, ibdev, ibport); + rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, ismdev); if (rc) return rc; /* receive SMC Accept CLC message */ @@@ -543,8 -488,8 +548,8 @@@ static int smc_connect_rdma(struct smc_ int reason_code = 0;
mutex_lock(&smc_create_lgr_pending); - local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl, - aclc->hdr.flag); + local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, + ibport, &aclc->lcl, NULL, 0); if (local_contact < 0) { if (local_contact == -ENOMEM) reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ @@@ -559,7 -504,7 +564,7 @@@ smc_conn_save_peer_info(smc, aclc);
/* create send buffer and rmb */ - if (smc_buf_create(smc)) + if (smc_buf_create(smc, false)) return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
if (local_contact == SMC_FIRST_CONTACT) @@@ -606,50 -551,11 +611,50 @@@ return 0; }
+/* setup for ISM connection of client */ +static int smc_connect_ism(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *aclc, + struct smcd_dev *ismdev) +{ + int local_contact = SMC_FIRST_CONTACT; + int rc = 0; + + mutex_lock(&smc_create_lgr_pending); + local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, + NULL, ismdev, aclc->gid); + if (local_contact < 0) + return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); + + /* Create send and receive buffers */ + if (smc_buf_create(smc, true)) + return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); + + smc_conn_save_peer_info(smc, aclc); + smc_close_init(smc); + smc_rx_init(smc); + smc_tx_init(smc); + + rc = smc_clc_send_confirm(smc); + if (rc) + return smc_connect_abort(smc, rc, local_contact); + mutex_unlock(&smc_create_lgr_pending); + + smc_copy_sock_settings_to_clc(smc); + if (smc->sk.sk_state == SMC_INIT) + smc->sk.sk_state = SMC_ACTIVE; + + return 0; +} + /* perform steps before actually connecting */ static int __smc_connect(struct smc_sock *smc) { + bool ism_supported = false, rdma_supported = false; struct smc_clc_msg_accept_confirm aclc; struct smc_ib_device *ibdev; + struct smcd_dev *ismdev; + unsigned short vlan; + int smc_type; int rc = 0; u8 ibport;
@@@ -666,52 -572,20 +671,52 @@@ if (using_ipsec(smc)) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
- /* check if a RDMA device is available; if not, fall back */ - if (smc_check_rdma(smc, &ibdev, &ibport)) + /* check for VLAN ID */ + if (smc_vlan_by_tcpsk(smc->clcsock, &vlan)) + return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); + + /* check if there is an ism device available */ + if (!smc_check_ism(smc, &ismdev) && + !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) { + /* ISM is supported for this connection */ + ism_supported = true; + smc_type = SMC_TYPE_D; + } + + /* check if there is a rdma device available */ + if (!smc_check_rdma(smc, &ibdev, &ibport)) { + /* RDMA is supported for this connection */ + rdma_supported = true; + if (ism_supported) + smc_type = SMC_TYPE_B; /* both */ + else + smc_type = SMC_TYPE_R; /* only RDMA */ + } + + /* if neither ISM nor RDMA are supported, fallback */ + if (!rdma_supported && !ism_supported) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);
/* perform CLC handshake */ - rc = smc_connect_clc(smc, &aclc, ibdev, ibport); - if (rc) + rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, ismdev); + if (rc) { + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); + }
- /* connect using rdma */ - rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); - if (rc) + /* depending on previous steps, connect using rdma or ism */ + if (rdma_supported && aclc.hdr.path == SMC_TYPE_R) + rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); + else if (ism_supported && aclc.hdr.path == SMC_TYPE_D) + rc = smc_connect_ism(smc, &aclc, ismdev); + else + rc = SMC_CLC_DECL_CNFERR; + if (rc) { + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); + }
+ smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return 0; }
@@@ -1079,8 -953,7 +1084,8 @@@ static int smc_listen_rdma_init(struct int *local_contact) { /* allocate connection / link group */ - *local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0); + *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, + &pclc->lcl, NULL, 0); if (*local_contact < 0) { if (*local_contact == -ENOMEM) return SMC_CLC_DECL_MEM;/* insufficient memory*/ @@@ -1088,50 -961,12 +1093,50 @@@ }
/* create send buffer and rmb */ - if (smc_buf_create(new_smc)) + if (smc_buf_create(new_smc, false)) return SMC_CLC_DECL_MEM;
return 0; }
+/* listen worker: initialize connection and buffers for SMC-D */ +static int smc_listen_ism_init(struct smc_sock *new_smc, + struct smc_clc_msg_proposal *pclc, + struct smcd_dev *ismdev, + int *local_contact) +{ + struct smc_clc_msg_smcd *pclc_smcd; + + pclc_smcd = smc_get_clc_msg_smcd(pclc); + *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, + ismdev, pclc_smcd->gid); + if (*local_contact < 0) { + if (*local_contact == -ENOMEM) + return SMC_CLC_DECL_MEM;/* insufficient memory*/ + return SMC_CLC_DECL_INTERR; /* other error */ + } + + /* Check if peer can be reached via ISM device */ + if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid, + new_smc->conn.lgr->vlan_id, + new_smc->conn.lgr->smcd)) { + if (*local_contact == SMC_FIRST_CONTACT) + smc_lgr_forget(new_smc->conn.lgr); + smc_conn_free(&new_smc->conn); + return SMC_CLC_DECL_CNFERR; + } + + /* Create send and receive buffers */ + if (smc_buf_create(new_smc, true)) { + if (*local_contact == SMC_FIRST_CONTACT) + smc_lgr_forget(new_smc->conn.lgr); + smc_conn_free(&new_smc->conn); + return SMC_CLC_DECL_MEM; + } + + return 0; +} + /* listen worker: register buffers */ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) { @@@ -1190,8 -1025,6 +1195,8 @@@ static void smc_listen_work(struct work struct smc_clc_msg_accept_confirm cclc; struct smc_clc_msg_proposal *pclc; struct smc_ib_device *ibdev; + bool ism_supported = false; + struct smcd_dev *ismdev; u8 buf[SMC_CLC_MAX_LEN]; int local_contact = 0; int reason_code = 0; @@@ -1232,21 -1065,12 +1237,21 @@@ smc_rx_init(new_smc); smc_tx_init(new_smc);
+ /* check if ISM is available */ + if ((pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) && + !smc_check_ism(new_smc, &ismdev) && + !smc_listen_ism_init(new_smc, pclc, ismdev, &local_contact)) { + ism_supported = true; + } + /* check if RDMA is available */ - if (smc_check_rdma(new_smc, &ibdev, &ibport) || - smc_listen_rdma_check(new_smc, pclc) || - smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, - &local_contact) || - smc_listen_rdma_reg(new_smc, local_contact)) { + if (!ism_supported && + ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) || + smc_check_rdma(new_smc, &ibdev, &ibport) || + smc_listen_rdma_check(new_smc, pclc) || + smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, + &local_contact) || + smc_listen_rdma_reg(new_smc, local_contact))) { /* SMC not supported, decline */ mutex_unlock(&smc_create_lgr_pending); smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact); @@@ -1271,8 -1095,7 +1276,8 @@@ }
/* finish worker */ - smc_listen_rdma_finish(new_smc, &cclc, local_contact); + if (!ism_supported) + smc_listen_rdma_finish(new_smc, &cclc, local_contact); smc_conn_save_peer_info(new_smc, &cclc); mutex_unlock(&smc_create_lgr_pending); smc_listen_out_connected(new_smc); @@@ -1527,6 -1350,8 +1532,8 @@@ static __poll_t smc_poll(struct file *f if (sk->sk_err) mask |= EPOLLERR; } else { + if (sk->sk_state != SMC_CLOSED) + sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_err) mask |= EPOLLERR; if ((sk->sk_shutdown == SHUTDOWN_MASK) || @@@ -1552,7 -1377,6 +1559,6 @@@ } if (smc->conn.urg_state == SMC_URG_VALID) mask |= EPOLLPRI; - }
return mask; @@@ -1633,7 -1457,8 +1639,8 @@@ static int smc_setsockopt(struct socke
if (optlen < sizeof(int)) return -EINVAL; - get_user(val, (int __user *)optval); + if (get_user(val, (int __user *)optval)) + return -EFAULT;
lock_sock(sk); switch (optname) { @@@ -1701,10 -1526,13 +1708,13 @@@ static int smc_ioctl(struct socket *soc return -EBADF; return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); } + lock_sock(&smc->sk); switch (cmd) { case SIOCINQ: /* same as FIONREAD */ - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; @@@ -1713,8 -1541,10 +1723,10 @@@ break; case SIOCOUTQ: /* output queue size (not send + not acked) */ - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; @@@ -1724,8 -1554,10 +1736,10 @@@ break; case SIOCOUTQNSD: /* output queue size (not send only) */ - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; @@@ -1733,8 -1565,10 +1747,10 @@@ answ = smc_tx_prepared_sends(&smc->conn); break; case SIOCATMARK: - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) { answ = 0; @@@ -1750,8 -1584,10 +1766,10 @@@ } break; default: + release_sock(&smc->sk); return -ENOIOCTLCMD; } + release_sock(&smc->sk);
return put_user(answ, (int __user *)arg); } diff --combined net/smc/smc_clc.c index 038d70ef7892,ae5d168653ce..ad39efdb4f1c --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@@ -23,15 -23,9 +23,15 @@@ #include "smc_core.h" #include "smc_clc.h" #include "smc_ib.h" +#include "smc_ism.h" + +#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 +#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
/* eye catcher "SMCR" EBCDIC for CLC messages */ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; +/* eye catcher "SMCD" EBCDIC for CLC messages */ +static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
/* check if received message has a correct header length and contains valid * heading and trailing eyecatchers @@@ -44,14 -38,10 +44,14 @@@ static bool smc_clc_msg_hdr_valid(struc struct smc_clc_msg_decline *dclc; struct smc_clc_msg_trail *trl;
- if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER))) + if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; switch (clcm->type) { case SMC_CLC_PROPOSAL: + if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && + clcm->path != SMC_TYPE_B) + return false; pclc = (struct smc_clc_msg_proposal *)clcm; pclc_prfx = smc_clc_proposal_get_prefix(pclc); if (ntohs(pclc->hdr.length) != @@@ -66,16 -56,10 +66,16 @@@ break; case SMC_CLC_ACCEPT: case SMC_CLC_CONFIRM: + if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D) + return false; clc = (struct smc_clc_msg_accept_confirm *)clcm; - if (ntohs(clc->hdr.length) != sizeof(*clc)) + if ((clcm->path == SMC_TYPE_R && + ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) || + (clcm->path == SMC_TYPE_D && + ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN)) return false; - trl = &clc->trl; + trl = (struct smc_clc_msg_trail *) + ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl)); break; case SMC_CLC_DECLINE: dclc = (struct smc_clc_msg_decline *)clcm; @@@ -86,8 -70,7 +86,8 @@@ default: return false; } - if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER))) + if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; return true; } @@@ -267,6 -250,7 +267,7 @@@ out int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, u8 expected_type) { + long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo; struct sock *clc_sk = smc->clcsock->sk; struct smc_clc_msg_hdr *clcm = buf; struct msghdr msg = {NULL, 0}; @@@ -312,9 -296,6 +313,9 @@@ datlen = ntohs(clcm->length); if ((len < sizeof(struct smc_clc_msg_hdr)) || (datlen > buflen) || + (clcm->version != SMC_CLC_V1) || + (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && + clcm->path != SMC_TYPE_B) || ((clcm->type != SMC_CLC_DECLINE) && (clcm->type != expected_type))) { smc->sk.sk_err = EPROTO; @@@ -326,7 -307,6 +327,6 @@@ memset(&msg, 0, sizeof(struct msghdr)); iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); krflags = MSG_WAITALL; - smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; len = sock_recvmsg(smc->clcsock, &msg, krflags); if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { smc->sk.sk_err = EPROTO; @@@ -342,6 -322,7 +342,7 @@@ }
out: + smc->clcsock->sk->sk_rcvtimeo = rcvtimeo; return reason_code; }
@@@ -376,18 -357,17 +377,18 @@@ int smc_clc_send_decline(struct smc_soc }
/* send CLC PROPOSAL message across internal TCP socket */ -int smc_clc_send_proposal(struct smc_sock *smc, - struct smc_ib_device *smcibdev, - u8 ibport) +int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, + struct smc_ib_device *ibdev, u8 ibport, + struct smcd_dev *ismdev) { struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX]; struct smc_clc_msg_proposal_prefix pclc_prfx; + struct smc_clc_msg_smcd pclc_smcd; struct smc_clc_msg_proposal pclc; struct smc_clc_msg_trail trl; int len, i, plen, rc; int reason_code = 0; - struct kvec vec[4]; + struct kvec vec[5]; struct msghdr msg;
/* retrieve ip prefixes for CLC proposal msg */ @@@ -402,34 -382,18 +403,34 @@@ memset(&pclc, 0, sizeof(pclc)); memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); pclc.hdr.type = SMC_CLC_PROPOSAL; - pclc.hdr.length = htons(plen); pclc.hdr.version = SMC_CLC_V1; /* SMC version */ - memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE); - memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN); - pclc.iparea_offset = htons(0); + pclc.hdr.path = smc_type; + if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) { + /* add SMC-R specifics */ + memcpy(pclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&pclc.lcl.gid, &ibdev->gid[ibport - 1], SMC_GID_SIZE); + memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN); + pclc.iparea_offset = htons(0); + } + if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) { + /* add SMC-D specifics */ + memset(&pclc_smcd, 0, sizeof(pclc_smcd)); + plen += sizeof(pclc_smcd); + pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET); + pclc_smcd.gid = ismdev->local_gid; + } + pclc.hdr.length = htons(plen);
memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); memset(&msg, 0, sizeof(msg)); i = 0; vec[i].iov_base = &pclc; vec[i++].iov_len = sizeof(pclc); + if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) { + vec[i].iov_base = &pclc_smcd; + vec[i++].iov_len = sizeof(pclc_smcd); + } vec[i].iov_base = &pclc_prfx; vec[i++].iov_len = sizeof(pclc_prfx); if (pclc_prfx.ipv6_prefixes_cnt > 0) { @@@ -465,56 -429,35 +466,56 @@@ int smc_clc_send_confirm(struct smc_soc struct kvec vec; int len;
- link = &conn->lgr->lnk[SMC_SINGLE_LINK]; /* send SMC Confirm CLC msg */ memset(&cclc, 0, sizeof(cclc)); - memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); cclc.hdr.type = SMC_CLC_CONFIRM; - cclc.hdr.length = htons(sizeof(cclc)); cclc.hdr.version = SMC_CLC_V1; /* SMC version */ - memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); - memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - hton24(cclc.qpn, link->roce_qp->qp_num); - cclc.rmb_rkey = - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); - cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ - cclc.rmbe_alert_token = htonl(conn->alert_token_local); - cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); - cclc.rmbe_size = conn->rmbe_size_short; - cclc.rmb_dma_addr = cpu_to_be64( - (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); - hton24(cclc.psn, link->psn_initial); - - memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + if (smc->conn.lgr->is_smcd) { + /* SMC-D specific settings */ + memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + cclc.hdr.path = SMC_TYPE_D; + cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); + cclc.gid = conn->lgr->smcd->local_gid; + cclc.token = conn->rmb_desc->token; + cclc.dmbe_size = conn->rmbe_size_short; + cclc.dmbe_idx = 0; + memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); + memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + } else { + /* SMC-R specific settings */ + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + cclc.hdr.path = SMC_TYPE_R; + cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + memcpy(cclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + hton24(cclc.qpn, link->roce_qp->qp_num); + cclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); + cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ + cclc.rmbe_alert_token = htonl(conn->alert_token_local); + cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); + cclc.rmbe_size = conn->rmbe_size_short; + cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address + (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); + hton24(cclc.psn, link->psn_initial); + memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + }
memset(&msg, 0, sizeof(msg)); vec.iov_base = &cclc; - vec.iov_len = sizeof(cclc); - len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc)); - if (len < sizeof(cclc)) { + vec.iov_len = ntohs(cclc.hdr.length); + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, + ntohs(cclc.hdr.length)); + if (len < ntohs(cclc.hdr.length)) { if (len >= 0) { reason_code = -ENETUNREACH; smc->sk.sk_err = -reason_code; @@@ -537,58 -480,35 +538,58 @@@ int smc_clc_send_accept(struct smc_soc int rc = 0; int len;
- link = &conn->lgr->lnk[SMC_SINGLE_LINK]; memset(&aclc, 0, sizeof(aclc)); - memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); aclc.hdr.type = SMC_CLC_ACCEPT; - aclc.hdr.length = htons(sizeof(aclc)); aclc.hdr.version = SMC_CLC_V1; /* SMC version */ if (srv_first_contact) aclc.hdr.flag = 1; - memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); - memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - hton24(aclc.qpn, link->roce_qp->qp_num); - aclc.rmb_rkey = - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); - aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */ - aclc.rmbe_alert_token = htonl(conn->alert_token_local); - aclc.qp_mtu = link->path_mtu; - aclc.rmbe_size = conn->rmbe_size_short, - aclc.rmb_dma_addr = cpu_to_be64( - (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); - hton24(aclc.psn, link->psn_initial); - memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + + if (new_smc->conn.lgr->is_smcd) { + /* SMC-D specific settings */ + aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); + memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + aclc.hdr.path = SMC_TYPE_D; + aclc.gid = conn->lgr->smcd->local_gid; + aclc.token = conn->rmb_desc->token; + aclc.dmbe_size = conn->rmbe_size_short; + aclc.dmbe_idx = 0; + memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); + memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + } else { + /* SMC-R specific settings */ + aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + aclc.hdr.path = SMC_TYPE_R; + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + memcpy(aclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + hton24(aclc.qpn, link->roce_qp->qp_num); + aclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); + aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */ + aclc.rmbe_alert_token = htonl(conn->alert_token_local); + aclc.qp_mtu = link->path_mtu; + aclc.rmbe_size = conn->rmbe_size_short, + aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address + (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); + hton24(aclc.psn, link->psn_initial); + memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + }
memset(&msg, 0, sizeof(msg)); vec.iov_base = &aclc; - vec.iov_len = sizeof(aclc); - len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc)); - if (len < sizeof(aclc)) { + vec.iov_len = ntohs(aclc.hdr.length); + len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, + ntohs(aclc.hdr.length)); + if (len < ntohs(aclc.hdr.length)) { if (len >= 0) new_smc->sk.sk_err = EPROTO; else diff --combined net/tipc/node.c index 3819ab14e073,0453bd451ce8..68014f1b6976 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@@ -45,7 -45,6 +45,7 @@@ #include "netlink.h"
#define INVALID_NODE_SIG 0x10000 +#define NODE_CLEANUP_AFTER 300000
/* Flags used to take different actions according to flag type * TIPC_NOTIFY_NODE_DOWN: notify node is down @@@ -97,7 -96,6 +97,7 @@@ struct tipc_bclink_entry * @link_id: local and remote bearer ids of changing link, if any * @publ_list: list of publications * @rcu: rcu struct for tipc_node + * @delete_at: indicates the time for deleting a down node */ struct tipc_node { u32 addr; @@@ -123,7 -121,6 +123,7 @@@ unsigned long keepalive_intv; struct timer_list timer; struct rcu_head rcu; + unsigned long delete_at; };
/* Node FSM states and events: @@@ -163,7 -160,6 +163,7 @@@ static struct tipc_node *tipc_node_find static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); static void tipc_node_put(struct tipc_node *node); static bool node_is_up(struct tipc_node *n); +static void tipc_node_delete_from_list(struct tipc_node *node);
struct tipc_sock_conn { u32 port; @@@ -363,24 -359,13 +363,24 @@@ static struct tipc_node *tipc_node_crea { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *n, *temp_node; + struct tipc_link *l; + int bearer_id; int i;
spin_lock_bh(&tn->node_list_lock); n = tipc_node_find(net, addr); if (n) { + if (n->capabilities == capabilities) + goto exit; /* Same node may come back with new capabilities */ + write_lock_bh(&n->lock); n->capabilities = capabilities; + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + l = n->links[bearer_id].link; + if (l) + tipc_link_update_caps(l, capabilities); + } + write_unlock_bh(&n->lock); goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); @@@ -405,7 -390,6 +405,7 @@@ for (i = 0; i < MAX_BEARERS; i++) spin_lock_init(&n->links[i].lock); n->state = SELF_DOWN_PEER_LEAVING; + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); n->signature = INVALID_NODE_SIG; n->active_links[0] = INVALID_BEARER_ID; n->active_links[1] = INVALID_BEARER_ID; @@@ -449,16 -433,11 +449,16 @@@ static void tipc_node_calculate_timer(s tipc_link_set_abort_limit(l, tol / n->keepalive_intv); }
-static void tipc_node_delete(struct tipc_node *node) +static void tipc_node_delete_from_list(struct tipc_node *node) { list_del_rcu(&node->list); hlist_del_rcu(&node->hash); tipc_node_put(node); +} + +static void tipc_node_delete(struct tipc_node *node) +{ + tipc_node_delete_from_list(node);
del_timer_sync(&node->timer); tipc_node_put(node); @@@ -565,42 -544,6 +565,42 @@@ void tipc_node_remove_conn(struct net * tipc_node_put(node); }
+static void tipc_node_clear_links(struct tipc_node *node) +{ + int i; + + for (i = 0; i < MAX_BEARERS; i++) { + struct tipc_link_entry *le = &node->links[i]; + + if (le->link) { + kfree(le->link); + le->link = NULL; + node->link_cnt--; + } + } +} + +/* tipc_node_cleanup - delete nodes that does not + * have active links for NODE_CLEANUP_AFTER time + */ +static int tipc_node_cleanup(struct tipc_node *peer) +{ + struct tipc_net *tn = tipc_net(peer->net); + bool deleted = false; + + spin_lock_bh(&tn->node_list_lock); + tipc_node_write_lock(peer); + + if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { + tipc_node_clear_links(peer); + tipc_node_delete_from_list(peer); + deleted = true; + } + tipc_node_write_unlock(peer); + spin_unlock_bh(&tn->node_list_lock); + return deleted; +} + /* tipc_node_timeout - handle expiration of node timer */ static void tipc_node_timeout(struct timer_list *t) @@@ -608,29 -551,21 +608,29 @@@ struct tipc_node *n = from_timer(n, t, timer); struct tipc_link_entry *le; struct sk_buff_head xmitq; + int remains = n->link_cnt; int bearer_id; int rc = 0;
+ if (!node_is_up(n) && tipc_node_cleanup(n)) { + /*Removing the reference of Timer*/ + tipc_node_put(n); + return; + } + __skb_queue_head_init(&xmitq);
- for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { tipc_node_read_lock(n); le = &n->links[bearer_id]; - spin_lock_bh(&le->lock); if (le->link) { + spin_lock_bh(&le->lock); /* Link tolerance may change asynchronously: */ tipc_node_calculate_timer(n, le->link); rc = tipc_link_timeout(le->link, &xmitq); + spin_unlock_bh(&le->lock); + remains--; } - spin_unlock_bh(&le->lock); tipc_node_read_unlock(n); tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); if (rc & TIPC_LINK_DOWN_EVT) @@@ -862,6 -797,7 +862,7 @@@ static u32 tipc_node_suggest_addr(struc }
/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not + * Returns suggested address if any, otherwise 0 */ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) { @@@ -884,12 -820,14 +885,14 @@@ if (n) { addr = n->addr; tipc_node_put(n); + return addr; } - /* Even this node may be in trial phase */ + + /* Even this node may be in conflict */ if (tn->trial_addr == addr) return tipc_node_suggest_addr(net, addr);
- return addr; + return 0; }
void tipc_node_check_dest(struct net *net, u32 addr, @@@ -1236,7 -1174,6 +1239,7 @@@ static void node_lost_contact(struct ti uint i;
pr_debug("Lost contact with %x\n", n->addr); + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
/* Clean up broadcast state */ tipc_bcast_remove_peer(n->net, n->bc_entry.link); @@@ -1544,7 -1481,7 +1547,7 @@@ static void tipc_node_bc_rcv(struct ne * tipc_node_check_state - check and if necessary update node state * @skb: TIPC packet * @bearer_id: identity of bearer delivering the packet - * Returns true if state is ok, otherwise consumes buffer and returns false + * Returns true if state and msg are ok, otherwise false */ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, int bearer_id, struct sk_buff_head *xmitq) @@@ -1578,9 -1515,6 +1581,9 @@@ } }
+ if (!tipc_link_validate_msg(l, hdr)) + return false; + /* Check and update node accesibility if applicable */ if (state == SELF_UP_PEER_COMING) { if (!tipc_link_is_up(l)) @@@ -1809,6 -1743,7 +1812,6 @@@ int tipc_nl_peer_rm(struct sk_buff *skb struct tipc_node *peer; u32 addr; int err; - int i;
/* We identify the peer by its net */ if (!info->attrs[TIPC_NLA_NET]) @@@ -1843,7 -1778,15 +1846,7 @@@ goto err_out; }
- for (i = 0; i < MAX_BEARERS; i++) { - struct tipc_link_entry *le = &peer->links[i]; - - if (le->link) { - kfree(le->link); - le->link = NULL; - peer->link_cnt--; - } - } + tipc_node_clear_links(peer); tipc_node_write_unlock(peer); tipc_node_delete(peer);
diff --combined net/tls/tls_sw.c index 7d194c0cd6cf,4618f1c31137..0c2d029c9d4c --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@@ -53,14 -53,18 +53,14 @@@ static int tls_do_decryption(struct soc { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm = strp_msg(skb); struct aead_request *aead_req;
int ret; - unsigned int req_size = sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_recv);
- aead_req = kzalloc(req_size, flags); + aead_req = aead_request_alloc(ctx->aead_recv, flags); if (!aead_req) return -ENOMEM;
- aead_request_set_tfm(aead_req, ctx->aead_recv); aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, sgin, sgout, data_len + tls_ctx->rx.tag_size, @@@ -70,7 -74,19 +70,7 @@@
ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
- if (ret < 0) - goto out; - - rxm->offset += tls_ctx->rx.prepend_size; - rxm->full_len -= tls_ctx->rx.overhead_size; - tls_advance_record_sn(sk, &tls_ctx->rx); - - ctx->decrypted = true; - - ctx->saved_data_ready(sk); - -out: - kfree(aead_req); + aead_request_free(aead_req); return ret; }
@@@ -208,7 -224,8 +208,7 @@@ static int tls_push_record(struct sock struct aead_request *req; int rc;
- req = kzalloc(sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation); + req = aead_request_alloc(ctx->aead_send, sk->sk_allocation); if (!req) return -ENOMEM;
@@@ -250,7 -267,7 +250,7 @@@
tls_advance_record_sn(sk, &tls_ctx->tx); out_req: - kfree(req); + aead_request_free(req); return rc; }
@@@ -263,7 -280,7 +263,7 @@@ static int zerocopy_from_iter(struct so int length, int *pages_used, unsigned int *size_used, struct scatterlist *to, int to_max_pages, - bool charge) + bool charge, bool revert) { struct page *pages[MAX_SKB_FRAGS];
@@@ -314,8 -331,6 +314,8 @@@ out: *size_used = size; *pages_used = num_elem; + if (revert) + iov_iter_revert(from, size);
return rc; } @@@ -417,7 -432,7 +417,7 @@@ alloc_encrypted &ctx->sg_plaintext_size, ctx->sg_plaintext_data, ARRAY_SIZE(ctx->sg_plaintext_data), - true); + true, false); if (ret) goto fallback_to_reg_send;
@@@ -425,7 -440,7 +425,7 @@@ ret = tls_push_record(sk, msg->msg_flags, record_type); if (!ret) continue; - if (ret == -EAGAIN) + if (ret < 0) goto send_end;
copied -= try_to_copy; @@@ -655,38 -670,8 +655,38 @@@ static struct sk_buff *tls_wait_data(st return skb; }
-static int decrypt_skb(struct sock *sk, struct sk_buff *skb, - struct scatterlist *sgout) +static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout, bool *zc) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + struct strp_msg *rxm = strp_msg(skb); + int err = 0; + +#ifdef CONFIG_TLS_DEVICE + err = tls_device_decrypted(sk, skb); + if (err < 0) + return err; +#endif + if (!ctx->decrypted) { + err = decrypt_skb(sk, skb, sgout); + if (err < 0) + return err; + } else { + *zc = false; + } + + rxm->offset += tls_ctx->rx.prepend_size; + rxm->full_len -= tls_ctx->rx.overhead_size; + tls_advance_record_sn(sk, &tls_ctx->rx); + ctx->decrypted = true; + ctx->saved_data_ready(sk); + + return err; +} + +int decrypt_skb(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@@ -716,6 -701,10 +716,10 @@@ nsg = skb_to_sgvec(skb, &sgin[1], rxm->offset + tls_ctx->rx.prepend_size, rxm->full_len - tls_ctx->rx.prepend_size); + if (nsg < 0) { + ret = nsg; + goto out; + }
tls_make_aad(ctx->rx_aad_ciphertext, rxm->full_len - tls_ctx->rx.overhead_size, @@@ -727,6 -716,7 +731,7 @@@ rxm->full_len - tls_ctx->rx.overhead_size, skb, sk->sk_allocation);
+ out: if (sgin != &sgin_arr[0]) kfree(sgin);
@@@ -827,11 -817,11 +832,11 @@@ int tls_sw_recvmsg(struct sock *sk err = zerocopy_from_iter(sk, &msg->msg_iter, to_copy, &pages, &chunk, &sgin[1], - MAX_SKB_FRAGS, false); + MAX_SKB_FRAGS, false, true); if (err < 0) goto fallback_to_reg_recv;
- err = decrypt_skb(sk, skb, sgin); + err = decrypt_skb_update(sk, skb, sgin, &zc); for (; pages > 0; pages--) put_page(sg_page(&sgin[pages])); if (err < 0) { @@@ -840,7 -830,7 +845,7 @@@ } } else { fallback_to_reg_recv: - err = decrypt_skb(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL, &zc); if (err < 0) { tls_err_abort(sk, EBADMSG); goto recv_end; @@@ -895,7 -885,6 +900,7 @@@ ssize_t tls_sw_splice_read(struct socke int err = 0; long timeo; int chunk; + bool zc;
lock_sock(sk);
@@@ -912,7 -901,7 +917,7 @@@ }
if (!ctx->decrypted) { - err = decrypt_skb(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL, &zc);
if (err < 0) { tls_err_abort(sk, EBADMSG); @@@ -958,7 -947,7 +963,7 @@@ static int tls_read_size(struct strpars { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - char header[tls_ctx->rx.prepend_size]; + char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; struct strp_msg *rxm = strp_msg(skb); size_t cipher_overhead; size_t data_len = 0; @@@ -968,12 -957,6 +973,12 @@@ if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) return 0;
+ /* Sanity-check size of on-stack buffer. */ + if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) { + ret = -EINVAL; + goto read_failure; + } + /* Linearize header to local buffer */ ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
@@@ -1001,10 -984,6 +1006,10 @@@ goto read_failure; }
+#ifdef CONFIG_TLS_DEVICE + handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, + *(u64*)tls_ctx->rx.rec_seq); +#endif return data_len + TLS_HEADER_SIZE;
read_failure: @@@ -1017,6 -996,9 +1022,6 @@@ static void tls_queue(struct strparser { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm; - - rxm = strp_msg(skb);
ctx->decrypted = false;
@@@ -1046,7 -1028,7 +1051,7 @@@ void tls_sw_free_resources_tx(struct so kfree(ctx); }
-void tls_sw_free_resources_rx(struct sock *sk) +void tls_sw_release_resources_rx(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@@ -1065,14 -1047,6 +1070,14 @@@ strp_done(&ctx->strp); lock_sock(sk); } +} + +void tls_sw_free_resources_rx(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + + tls_sw_release_resources_rx(sk);
kfree(ctx); } @@@ -1097,38 -1071,28 +1102,38 @@@ int tls_set_sw_offload(struct sock *sk }
if (tx) { - sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); - if (!sw_ctx_tx) { - rc = -ENOMEM; - goto out; + if (!ctx->priv_ctx_tx) { + sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); + if (!sw_ctx_tx) { + rc = -ENOMEM; + goto out; + } + ctx->priv_ctx_tx = sw_ctx_tx; + } else { + sw_ctx_tx = + (struct tls_sw_context_tx *)ctx->priv_ctx_tx; } - crypto_init_wait(&sw_ctx_tx->async_wait); - ctx->priv_ctx_tx = sw_ctx_tx; } else { - sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); - if (!sw_ctx_rx) { - rc = -ENOMEM; - goto out; + if (!ctx->priv_ctx_rx) { + sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); + if (!sw_ctx_rx) { + rc = -ENOMEM; + goto out; + } + ctx->priv_ctx_rx = sw_ctx_rx; + } else { + sw_ctx_rx = + (struct tls_sw_context_rx *)ctx->priv_ctx_rx; } - crypto_init_wait(&sw_ctx_rx->async_wait); - ctx->priv_ctx_rx = sw_ctx_rx; }
if (tx) { + crypto_init_wait(&sw_ctx_tx->async_wait); crypto_info = &ctx->crypto_send; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; } else { + crypto_init_wait(&sw_ctx_rx->async_wait); crypto_info = &ctx->crypto_recv; cctx = &ctx->rx; aead = &sw_ctx_rx->aead_recv; @@@ -1153,7 -1117,7 +1158,7 @@@ }
/* Sanity-check the IV size for stack allocations. */ - if (iv_size > MAX_IV_SIZE) { + if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) { rc = -EINVAL; goto free_priv; }
linux-merge@lists.open-mesh.org