The following commit has been merged in the master branch: commit c63d352f053a788281eb90df0a71cd3a6b2c4040 Merge: 862b3d2090ae3d8b10bb4ee9275fd932bc4d0d44 bc3913a5378cd0ddefd1dfec6917cc12eb23a946 Author: David S. Miller davem@davemloft.net Date: Tue Dec 6 21:33:19 2016 -0500
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ab990da,4febe60..688617a --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -10138,7 -10138,7 +10138,7 @@@ static void __bnx2x_add_udp_port(struc { struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
- if (!netif_running(bp->dev) || !IS_PF(bp)) + if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp)) return;
if (udp_port->count && udp_port->dst_port == port) { @@@ -10163,7 -10163,7 +10163,7 @@@ static void __bnx2x_del_udp_port(struc { struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
- if (!IS_PF(bp)) + if (!IS_PF(bp) || CHIP_IS_E1x(bp)) return;
if (!udp_port->count || udp_port->dst_port != port) { @@@ -12080,7 -12080,8 +12080,7 @@@ static int bnx2x_get_hwinfo(struct bnx2 mtu_size, mtu);
/* if valid: update device mtu */ - if (((mtu_size + ETH_HLEN) >= - ETH_MIN_PACKET_SIZE) && + if ((mtu_size >= ETH_MIN_PACKET_SIZE) && (mtu_size <= ETH_MAX_JUMBO_PACKET_SIZE)) bp->dev->mtu = mtu_size; @@@ -13314,10 -13315,6 +13314,10 @@@ static int bnx2x_init_dev(struct bnx2x dev->dcbnl_ops = &bnx2x_dcbnl_ops; #endif
+ /* MTU range, 46 - 9600 */ + dev->min_mtu = ETH_MIN_PACKET_SIZE; + dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; + /* get_port_hwinfo() will set prtad and mmds properly */ bp->mdio.prtad = MDIO_PRTAD_NONE; bp->mdio.mmds = 0; @@@ -13508,6 -13505,7 +13508,7 @@@ static int bnx2x_init_firmware(struct b
/* Initialize the pointers to the init arrays */ /* Blob */ + rc = -ENOMEM; BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
/* Opcodes */ diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index e8ab5fd,f08a20b..e84613a --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -54,7 -54,6 +54,7 @@@ #include "bnxt.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" +#include "bnxt_dcb.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@@ -187,11 -186,11 +187,11 @@@ static const u16 bnxt_vf_req_snif[] = };
static const u16 bnxt_async_events_arr[] = { - HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, - HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, - HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, - HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, - HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, + ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, + ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, };
static bool bnxt_vf_pciid(enum board_idx idx) @@@ -1477,8 -1476,8 +1477,8 @@@ next_rx_no_prod }
#define BNXT_GET_EVENT_PORT(data) \ - ((data) & \ - HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) + ((data) & \ + ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) @@@ -1487,7 -1486,7 +1487,7 @@@
/* TODO CHIMP_FW: Define event id's for link change, error etc */ switch (event_id) { - case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { u32 data1 = le32_to_cpu(cmpl->event_data1); struct bnxt_link_info *link_info = &bp->link_info;
@@@ -1500,16 -1499,15 +1500,16 @@@ netdev_warn(bp->dev, "Link speed %d no longer supported\n", speed); } + set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); /* fall thru */ } - case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; - case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: + case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; - case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { u32 data1 = le32_to_cpu(cmpl->event_data1); u16 port_id = BNXT_GET_EVENT_PORT(data1);
@@@ -1522,7 -1520,7 +1522,7 @@@ set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); break; } - case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: + case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: if (BNXT_PF(bp)) goto async_event_process_exit; set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); @@@ -3435,7 -3433,13 +3435,7 @@@ static int bnxt_hwrm_vnic_set_rss(struc
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); if (set_rss) { - vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - - req.hash_type = cpu_to_le32(vnic->hash_type); - + req.hash_type = cpu_to_le32(bp->rss_hash_cfg); if (vnic->flags & BNXT_VNIC_RSS_FLAG) { if (BNXT_CHIP_TYPE_NITRO_A0(bp)) max_rings = bp->rx_nr_rings - 1; @@@ -4116,7 -4120,7 +4116,7 @@@ static int bnxt_hwrm_stat_ctx_alloc(str bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; } mutex_unlock(&bp->hwrm_cmd_lock); - return 0; + return rc; }
static int bnxt_hwrm_func_qcfg(struct bnxt *bp) @@@ -4262,16 -4266,12 +4262,16 @@@ static int bnxt_hwrm_queue_qportcfg(str goto qportcfg_exit; } bp->max_tc = resp->max_configurable_queues; + bp->max_lltc = resp->max_configurable_lossless_queues; if (bp->max_tc > BNXT_MAX_QUEUE) bp->max_tc = BNXT_MAX_QUEUE;
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) bp->max_tc = 1;
+ if (bp->max_lltc > bp->max_tc) + bp->max_lltc = bp->max_tc; + qptr = &resp->queue_id0; for (i = 0; i < bp->max_tc; i++) { bp->q_info[i].queue_id = *qptr++; @@@ -4967,6 -4967,7 +4967,6 @@@ static void bnxt_init_napi(struct bnxt bnapi = bp->bnapi[cp_nr_rings]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0, 64); - napi_hash_add(&bnapi->napi); } } else { bnapi = bp->bnapi[0]; @@@ -4998,7 -4999,7 +4998,7 @@@ static void bnxt_enable_napi(struct bnx } }
-static void bnxt_tx_disable(struct bnxt *bp) +void bnxt_tx_disable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; @@@ -5016,7 -5017,7 +5016,7 @@@ netif_carrier_off(bp->dev); }
-static void bnxt_tx_enable(struct bnxt *bp) +void bnxt_tx_enable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; @@@ -5108,7 -5109,6 +5108,7 @@@ static int bnxt_update_link(struct bnx struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; u8 link_up = link_info->link_up; + u16 diff;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@@ -5196,23 -5196,6 +5196,23 @@@ link_info->link_up = 0; } mutex_unlock(&bp->hwrm_cmd_lock); + + diff = link_info->support_auto_speeds ^ link_info->advertising; + if ((link_info->support_auto_speeds | diff) != + link_info->support_auto_speeds) { + /* An advertised speed is no longer supported, so we need to + * update the advertisement settings. See bnxt_reset() for + * comments about the rtnl_lock() sequence below. + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); + link_info->advertising = link_info->support_auto_speeds; + if (test_bit(BNXT_STATE_OPEN, &bp->state) && + (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); + } return 0; }
@@@ -5377,7 -5360,7 +5377,7 @@@ static int bnxt_hwrm_shutdown_link(stru return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN); + req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
@@@ -5440,12 -5423,6 +5440,12 @@@ static int bnxt_update_phy_setting(stru update_link = true; }
+ /* The last close may have shutdown the link, so need to call + * PHY_CFG to bring it back up. + */ + if (!netif_carrier_ok(bp->dev)) + update_link = true; + if (!bnxt_eee_config_ok(bp)) update_eee = true;
@@@ -6139,10 -6116,6 +6139,10 @@@ static void bnxt_sp_task(struct work_st if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, + &bp->sp_event)) + bnxt_hwrm_phy_qcaps(bp); + rc = bnxt_update_link(bp, true); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", @@@ -6330,6 -6303,9 +6330,6 @@@ static int bnxt_change_mtu(struct net_d { struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9500) - return -EINVAL; - if (netif_running(dev)) bnxt_close_nic(bp, false, false);
@@@ -6342,10 -6318,17 +6342,10 @@@ return 0; }
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *ntc) +int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) { struct bnxt *bp = netdev_priv(dev); bool sh = false; - u8 tc; - - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc = ntc->tc;
if (tc > bp->max_tc) { netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", @@@ -6388,15 -6371,6 +6388,15 @@@ return 0; }
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) +{ + if (ntc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return bnxt_setup_mq_tc(dev, ntc->tc); +} + #ifdef CONFIG_RFS_ACCEL static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct bnxt_ntuple_filter *f2) @@@ -6687,7 -6661,6 +6687,7 @@@ static void bnxt_remove_one(struct pci_
bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); + bnxt_dcb_free(bp); pci_iounmap(pdev, bp->bar2); pci_iounmap(pdev, bp->bar1); pci_iounmap(pdev, bp->bar0); @@@ -6911,12 -6884,6 +6911,12 @@@ static int bnxt_init_one(struct pci_de dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 60 - 9500 */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = 9500; + + bnxt_dcb_init(bp); + #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); #endif @@@ -6957,19 -6924,6 +6957,19 @@@ #endif bnxt_set_dflt_rings(bp);
+ /* Default RSS hash cfg. */ + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) && + !BNXT_CHIP_TYPE_NITRO_A0(bp) && + bp->hwrm_spec_code >= 0x10501) { + bp->flags |= BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { diff --combined drivers/net/ethernet/cirrus/ep93xx_eth.c index 9119af0,9a161e9..a1de0d1 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@@ -468,6 -468,9 +468,9 @@@ static void ep93xx_free_buffers(struct struct device *dev = ep->dev->dev.parent; int i;
+ if (!ep->descs) + return; + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { dma_addr_t d;
@@@ -490,6 -493,7 +493,7 @@@
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep->descs_dma_addr); + ep->descs = NULL; }
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) @@@ -749,6 -753,7 +753,6 @@@ static const struct net_device_ops ep93 .ndo_start_xmit = ep93xx_xmit, .ndo_do_ioctl = ep93xx_ioctl, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, };
diff --combined drivers/net/ethernet/freescale/fec_main.c index ecaa7a9,12aef1b..38160c2 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -1841,11 -1841,11 +1841,11 @@@ static int fec_enet_clk_enable(struct n ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; - if (fep->clk_enet_out) { - ret = clk_prepare_enable(fep->clk_enet_out); - if (ret) - goto failed_clk_enet_out; - } + + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); @@@ -1857,20 -1857,23 +1857,20 @@@ } mutex_unlock(&fep->ptp_clk_mutex); } - if (fep->clk_ref) { - ret = clk_prepare_enable(fep->clk_ref); - if (ret) - goto failed_clk_ref; - } + + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; } else { clk_disable_unprepare(fep->clk_ahb); - if (fep->clk_enet_out) - clk_disable_unprepare(fep->clk_enet_out); + clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; mutex_unlock(&fep->ptp_clk_mutex); } - if (fep->clk_ref) - clk_disable_unprepare(fep->clk_ref); + clk_disable_unprepare(fep->clk_ref); }
return 0; @@@ -2310,6 -2313,8 +2310,8 @@@ static const struct fec_stat { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, };
+ #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) + static void fec_enet_update_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); @@@ -2327,7 -2332,7 +2329,7 @@@ static void fec_enet_get_ethtool_stats( if (netif_running(dev)) fec_enet_update_ethtool_stats(dev);
- memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64)); + memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); }
static void fec_enet_get_strings(struct net_device *netdev, @@@ -2352,8 -2357,24 +2354,14 @@@ static int fec_enet_get_sset_count(stru return -EOPNOTSUPP; } } + + #else /* !defined(CONFIG_M5272) */ + #define FEC_STATS_SIZE 0 + static inline void fec_enet_update_ethtool_stats(struct net_device *dev) + { + } #endif /* !defined(CONFIG_M5272) */
-static int fec_enet_nway_reset(struct net_device *dev) -{ - struct phy_device *phydev = dev->phydev; - - if (!phydev) - return -ENODEV; - - return genphy_restart_aneg(phydev); -} - /* ITR clock source is enet system clock (clk_ahb). * TCTT unit is cycle_ns * 64 cycle * So, the ICTT value = X us / (cycle_ns * 64) @@@ -2553,7 -2574,7 +2561,7 @@@ static const struct ethtool_ops fec_ene .get_drvinfo = fec_enet_get_drvinfo, .get_regs_len = fec_enet_get_regs_len, .get_regs = fec_enet_get_regs, - .nway_reset = fec_enet_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = fec_enet_get_coalesce, .set_coalesce = fec_enet_set_coalesce, @@@ -3054,6 -3075,7 +3062,6 @@@ static const struct net_device_ops fec_ .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, @@@ -3279,8 -3301,7 +3287,7 @@@ fec_probe(struct platform_device *pdev
/* Init network device */ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + - ARRAY_SIZE(fec_stats) * sizeof(u64), - num_tx_qs, num_rx_qs); + FEC_STATS_SIZE, num_tx_qs, num_rx_qs); if (!ndev) return -ENOMEM;
diff --combined drivers/net/ethernet/mellanox/mlx5/core/cmd.c index b0448b5,bfe410e..3797cc7 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@@ -54,6 -54,14 +54,6 @@@ enum };
enum { - NUM_LONG_LISTS = 2, - NUM_MED_LISTS = 64, - LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + - MLX5_CMD_DATA_BLOCK_SIZE, - MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, -}; - -enum { MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, @@@ -260,11 -268,6 +260,6 @@@ static void dump_buf(void *buf, int siz pr_debug("\n"); }
- enum { - MLX5_DRIVER_STATUS_ABORTED = 0xfe, - MLX5_DRIVER_SYND = 0xbadd00de, - }; - static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, u32 *synd, u8 *status) { @@@ -310,8 -313,6 +305,8 @@@ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: @@@ -413,14 -414,11 +408,14 @@@ case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP: - case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@@ -577,12 -575,6 +572,12 @@@ const char *mlx5_command_str(int comman MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); + MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); + MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); default: return "unknown command opcode"; } } @@@ -1066,13 -1058,14 +1061,13 @@@ static struct mlx5_cmd_mailbox *alloc_c if (!mailbox) return ERR_PTR(-ENOMEM);
- mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, - &mailbox->dma); + mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags, + &mailbox->dma); if (!mailbox->buf) { mlx5_core_dbg(dev, "failed allocation\n"); kfree(mailbox); return ERR_PTR(-ENOMEM); } - memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); mailbox->next = NULL;
return mailbox; @@@ -1363,10 -1356,10 +1358,10 @@@ static void free_msg(struct mlx5_core_d { unsigned long flags;
- if (msg->cache) { - spin_lock_irqsave(&msg->cache->lock, flags); - list_add_tail(&msg->list, &msg->cache->head); - spin_unlock_irqrestore(&msg->cache->lock, flags); + if (msg->parent) { + spin_lock_irqsave(&msg->parent->lock, flags); + list_add_tail(&msg->list, &msg->parent->head); + spin_unlock_irqrestore(&msg->parent->lock, flags); } else { mlx5_free_cmd_msg(dev, msg); } @@@ -1463,37 -1456,30 +1458,37 @@@ static struct mlx5_cmd_msg *alloc_msg(s gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct cmd_msg_cache *ch = NULL; struct mlx5_cmd *cmd = &dev->cmd; - struct cache_ent *ent = NULL; - - if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) - ent = &cmd->cache.large; - else if (in_size > 16 && in_size <= MED_LIST_SIZE) - ent = &cmd->cache.med; - - if (ent) { - spin_lock_irq(&ent->lock); - if (!list_empty(&ent->head)) { - msg = list_entry(ent->head.next, typeof(*msg), list); - /* For cached lists, we must explicitly state what is - * the real size - */ - msg->len = in_size; - list_del(&msg->list); + int i; + + if (in_size <= 16) + goto cache_miss; + + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { + ch = &cmd->cache[i]; + if (in_size > ch->max_inbox_size) + continue; + spin_lock_irq(&ch->lock); + if (list_empty(&ch->head)) { + spin_unlock_irq(&ch->lock); + continue; } - spin_unlock_irq(&ent->lock); + msg = list_entry(ch->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + spin_unlock_irq(&ch->lock); + break; }
- if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); + if (!IS_ERR(msg)) + return msg;
+cache_miss: + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg; }
@@@ -1591,56 -1577,58 +1586,56 @@@ EXPORT_SYMBOL(mlx5_cmd_exec_cb)
static void destroy_msg_cache(struct mlx5_core_dev *dev) { - struct mlx5_cmd *cmd = &dev->cmd; + struct cmd_msg_cache *ch; struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *n; + int i;
- list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { - list_del(&msg->list); - mlx5_free_cmd_msg(dev, msg); - } - - list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { - list_del(&msg->list); - mlx5_free_cmd_msg(dev, msg); + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { + ch = &dev->cmd.cache[i]; + list_for_each_entry_safe(msg, n, &ch->head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } } }
-static int create_msg_cache(struct mlx5_core_dev *dev) +static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { + 512, 32, 16, 8, 2 +}; + +static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { + 16 + MLX5_CMD_DATA_BLOCK_SIZE, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, +}; + +static void create_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; + struct cmd_msg_cache *ch; struct mlx5_cmd_msg *msg; - int err; int i; - - spin_lock_init(&cmd->cache.large.lock); - INIT_LIST_HEAD(&cmd->cache.large.head); - spin_lock_init(&cmd->cache.med.lock); - INIT_LIST_HEAD(&cmd->cache.med.head); - - for (i = 0; i < NUM_LONG_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); - if (IS_ERR(msg)) { - err = PTR_ERR(msg); - goto ex_err; - } - msg->cache = &cmd->cache.large; - list_add_tail(&msg->list, &cmd->cache.large.head); - } - - for (i = 0; i < NUM_MED_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); - if (IS_ERR(msg)) { - err = PTR_ERR(msg); - goto ex_err; + int k; + + /* Initialize and fill the caches with initial entries */ + for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { + ch = &cmd->cache[k]; + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->head); + ch->num_ent = cmd_cache_num_ent[k]; + ch->max_inbox_size = cmd_cache_ent_size[k]; + for (i = 0; i < ch->num_ent; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, + ch->max_inbox_size, 0); + if (IS_ERR(msg)) + break; + msg->parent = ch; + list_add_tail(&msg->list, &ch->head); } - msg->cache = &cmd->cache.med; - list_add_tail(&msg->list, &cmd->cache.med.head); } - - return 0; - -ex_err: - destroy_msg_cache(dev); - return err; }
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) @@@ -1763,7 -1751,11 +1758,7 @@@ int mlx5_cmd_init(struct mlx5_core_dev
cmd->mode = CMD_MODE_POLLING;
- err = create_msg_cache(dev); - if (err) { - dev_err(&dev->pdev->dev, "failed to create command cache\n"); - goto err_free_page; - } + create_msg_cache(dev);
set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index 63dd639,71382df..951dbd5 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -77,9 -77,9 +77,9 @@@ MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) -#define MLX5E_REQUIRED_MTTS(rqs, wqes)\ - (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) -#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX) +#define MLX5E_REQUIRED_MTTS(wqes) \ + (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
#define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) @@@ -150,6 -150,12 +150,6 @@@ static inline int mlx5_max_log_rq_size( } }
-enum { - MLX5E_INLINE_MODE_L2, - MLX5E_INLINE_MODE_VPORT_CONTEXT, - MLX5_INLINE_MODE_NOT_REQUIRED, -}; - struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@@ -167,28 -173,22 +167,28 @@@ struct mlx5e_umr_wqe struct mlx5_wqe_data_seg data; };
+extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; + static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { "rx_cqe_moder", + "rx_cqe_compress", };
enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), + MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1), };
-#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ - do { \ - if (enable) \ - priv->pflags |= pflag; \ - else \ - priv->pflags &= ~pflag; \ +#define MLX5E_SET_PFLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + (priv)->params.pflags |= (pflag); \ + else \ + (priv)->params.pflags &= ~(pflag); \ } while (0)
+#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag))) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #endif @@@ -207,7 -207,8 +207,7 @@@ struct mlx5e_params u16 num_channels; u8 num_tc; u8 rx_cq_period_mode; - bool rx_cqe_compress_admin; - bool rx_cqe_compress; + bool rx_cqe_compress_def; struct mlx5e_cq_moder rx_cq_moderation; struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; @@@ -219,34 -220,12 +219,34 @@@ u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; bool vlan_strip_disable; -#ifdef CONFIG_MLX5_CORE_EN_DCB - struct ieee_ets ets; -#endif bool rx_am_enabled; u32 lro_timeout; + u32 pflags; +}; + +#ifdef CONFIG_MLX5_CORE_EN_DCB +struct mlx5e_cee_config { + /* bw pct for priority group */ + u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; + u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; + bool pfc_setting[CEE_DCBX_MAX_PRIO]; + bool pfc_enable; +}; + +enum { + MLX5_DCB_CHG_RESET, + MLX5_DCB_NO_CHG, + MLX5_DCB_CHG_NO_RESET, +}; + +struct mlx5e_dcbx { + enum mlx5_dcbx_oper_mode mode; + struct mlx5e_cee_config cee_cfg; /* pending configuration */ + + /* The only setting that cannot be read from FW */ + u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; }; +#endif
struct mlx5e_tstamp { rwlock_t lock; @@@ -262,7 -241,7 +262,7 @@@ };
enum { - MLX5E_RQ_STATE_FLUSH, + MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, MLX5E_RQ_STATE_AM, }; @@@ -286,7 -265,7 +286,7 @@@ struct mlx5e_cq u16 decmprs_wqe_counter;
/* control */ - struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_frag_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp;
struct mlx5e_rq; @@@ -347,6 -326,7 +347,6 @@@ struct mlx5e_rq struct { struct mlx5e_mpw_info *info; void *mtt_no_align; - u32 mtt_offset; } mpwqe; }; struct { @@@ -381,7 -361,6 +381,7 @@@ u32 rqn; struct mlx5e_channel *channel; struct mlx5e_priv *priv; + struct mlx5_core_mkey umr_mkey; } ____cacheline_aligned_in_smp;
struct mlx5e_umr_dma_info { @@@ -415,7 -394,7 +415,7 @@@ struct mlx5e_sq_dma };
enum { - MLX5E_SQ_STATE_FLUSH, + MLX5E_SQ_STATE_ENABLED, MLX5E_SQ_STATE_BF_ENABLE, };
@@@ -545,7 -524,7 +545,7 @@@ struct mlx5e_vxlan_db
struct mlx5e_l2_rule { u8 addr[ETH_ALEN + 2]; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; };
struct mlx5e_flow_table { @@@ -566,10 -545,10 +566,10 @@@ struct mlx5e_tc_table struct mlx5e_vlan_table { struct mlx5e_flow_table ft; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; - struct mlx5_flow_rule *untagged_rule; - struct mlx5_flow_rule *any_vlan_rule; - bool filter_disabled; + struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *untagged_rule; + struct mlx5_flow_handle *any_vlan_rule; + bool filter_disabled; };
struct mlx5e_l2_table { @@@ -587,14 -566,14 +587,14 @@@ /* L3/L4 traffic type classifier */ struct mlx5e_ttc_table { struct mlx5e_flow_table ft; - struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; + struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; };
#define ARFS_HASH_SHIFT BITS_PER_BYTE #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) struct arfs_table { struct mlx5e_flow_table ft; - struct mlx5_flow_rule *default_rule; + struct mlx5_flow_handle *default_rule; struct hlist_head rules_hash[ARFS_HASH_SIZE]; };
@@@ -689,6 -668,7 +689,6 @@@ struct mlx5e_priv
unsigned long state; struct mutex state_lock; /* Protects Interface state */ - struct mlx5_core_mkey umr_mkey; struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel; @@@ -708,15 -688,12 +708,15 @@@ struct work_struct tx_timeout_work; struct delayed_work update_stats_work;
- u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_tstamp tstamp; u16 q_counter; +#ifdef CONFIG_MLX5_CORE_EN_DCB + struct mlx5e_dcbx dcbx; +#endif + const struct mlx5e_profile *profile; void *ppriv; }; @@@ -758,9 -735,6 +758,9 @@@ int mlx5e_create_flow_steering(struct m void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); +int mlx5e_self_test_num(struct mlx5e_priv *priv); +void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, + u64 *buf); int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, int location); int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, @@@ -837,7 -811,8 +837,7 @@@ static inline void mlx5e_cq_arm(struct
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) { - return rq->mpwqe.mtt_offset + - wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); + return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); }
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) @@@ -850,7 -825,6 +850,7 @@@ extern const struct ethtool_ops mlx5e_e #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); +void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); #endif
#ifndef CONFIG_RFS_ACCEL @@@ -886,8 -860,7 +886,8 @@@ void mlx5e_destroy_tir(struct mlx5_core struct mlx5e_tir *tir); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); -int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev); +int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, + bool enable_uc_lb);
struct mlx5_eswitch_rep; int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, @@@ -901,7 -874,6 +901,7 @@@ int mlx5e_add_sqs_fwd_rules(struct mlx5 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); @@@ -918,16 -890,8 +918,16 @@@ struct net_device *mlx5e_create_netdev( void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); -struct rtnl_link_stats64 * -mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); +void mlx5e_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti); +void mlx5e_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti); + +int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp); +bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); +bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv); #endif /* __MLX5_EN_H__ */ diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9def5cc,246d98e..0702027 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -84,8 -84,7 +84,8 @@@ static void mlx5e_set_rq_type_params(st switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; - priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ? + priv->params.mpwqe_log_stride_sz = + MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : MLX5_MPWRQ_LOG_STRIDE_SIZE; priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - @@@ -102,7 -101,7 +102,7 @@@ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, BIT(priv->params.log_rq_size), BIT(priv->params.mpwqe_log_stride_sz), - priv->params.rx_cqe_compress_admin); + MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)); }
static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv) @@@ -291,36 -290,12 +291,36 @@@ static void mlx5e_update_q_counter(stru &qcnt->rx_out_of_buffer); }
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + u32 *in; + + in = mlx5_vzalloc(sz); + if (!in) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + out = pcie_stats->pcie_tas_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + kvfree(in); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { mlx5e_update_q_counter(priv); mlx5e_update_vport_counters(priv); mlx5e_update_pport_counters(priv); mlx5e_update_sw_counters(priv); + mlx5e_update_pcie_counters(priv); }
void mlx5e_update_stats_work(struct work_struct *work) @@@ -471,50 -446,14 +471,50 @@@ static void mlx5e_rq_free_mpwqe_info(st kfree(rq->mpwqe.info); }
-static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) +static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv, + u64 npages, u8 page_shift, + struct mlx5_core_mkey *umr_mkey) { - struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + struct mlx5_core_dev *mdev = priv->mdev; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; + int err; + + if (!MLX5E_VALID_NUM_MTTS(npages)) + return -EINVAL; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); + + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); + MLX5_SET64(mkc, mkc, len, npages << page_shift); + MLX5_SET(mkc, mkc, translations_octword_size, + MLX5_MTT_OCTW(npages)); + MLX5_SET(mkc, mkc, log_page_size, page_shift);
- if (rep && rep->vport != FDB_UPLINK_VPORT) - return true; + err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
- return false; + kvfree(in); + return err; +} + +static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq) +{ + struct mlx5e_priv *priv = rq->priv; + u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size)); + + return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey); }
static int mlx5e_create_rq(struct mlx5e_channel *c, @@@ -550,13 -489,7 +550,13 @@@ rq->channel = c; rq->ix = c->ix; rq->priv = c->priv; - rq->xdp_prog = priv->xdp_prog; + + rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL; + if (IS_ERR(rq->xdp_prog)) { + err = PTR_ERR(rq->xdp_prog); + rq->xdp_prog = NULL; + goto err_rq_wq_destroy; + }
rq->buff.map_dir = DMA_FROM_DEVICE; if (rq->xdp_prog) @@@ -573,20 -506,18 +573,20 @@@ rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe.mtt_offset = c->ix * - MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size)); - rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; byte_count = rq->buff.wqe_sz; - rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key); - err = mlx5e_rq_alloc_mpwqe_info(rq, c); + + err = mlx5e_create_rq_umr_mkey(rq); if (err) goto err_rq_wq_destroy; + rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); + + err = mlx5e_rq_alloc_mpwqe_info(rq, c); + if (err) + goto err_destroy_umr_mkey; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info), @@@ -635,14 -566,12 +635,14 @@@ rq->page_cache.head = 0; rq->page_cache.tail = 0;
- if (rq->xdp_prog) - bpf_prog_add(rq->xdp_prog, 1); - return 0;
+err_destroy_umr_mkey: + mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + err_rq_wq_destroy: + if (rq->xdp_prog) + bpf_prog_put(rq->xdp_prog); mlx5_wq_destroy(&rq->wq_ctrl);
return err; @@@ -658,7 -587,6 +658,7 @@@ static void mlx5e_destroy_rq(struct mlx switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: mlx5e_rq_free_mpwqe_info(rq); + mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ kfree(rq->dma_info); @@@ -831,6 -759,7 +831,7 @@@ static int mlx5e_open_rq(struct mlx5e_c if (err) goto err_destroy_rq;
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) goto err_disable_rq; @@@ -845,6 -774,7 +846,7 @@@ return 0;
err_disable_rq: + clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); mlx5e_disable_rq(rq); err_destroy_rq: mlx5e_destroy_rq(rq); @@@ -854,7 -784,7 +856,7 @@@
static void mlx5e_close_rq(struct mlx5e_rq *rq) { - set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state); + clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ cancel_work_sync(&rq->am.work);
@@@ -1010,7 -940,7 +1012,7 @@@ static int mlx5e_create_sq(struct mlx5e sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; sq->min_inline_mode = - MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ? + MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ? param->min_inline_mode : 0;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); @@@ -1078,7 -1008,6 +1080,6 @@@ static int mlx5e_enable_sq(struct mlx5e MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); - MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); @@@ -1155,6 -1084,7 +1156,7 @@@ static int mlx5e_open_sq(struct mlx5e_c if (err) goto err_destroy_sq;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, false, 0); if (err) @@@ -1168,6 -1098,7 +1170,7 @@@ return 0;
err_disable_sq: + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); mlx5e_disable_sq(sq); err_destroy_sq: mlx5e_destroy_sq(sq); @@@ -1184,7 -1115,7 +1187,7 @@@ static inline void netif_tx_disable_que
static void mlx5e_close_sq(struct mlx5e_sq *sq) { - set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); /* prevent netif_tx_wake_queue */ napi_synchronize(&sq->channel->napi);
@@@ -1253,7 -1184,7 +1256,7 @@@ static int mlx5e_create_cq(struct mlx5e
static void mlx5e_destroy_cq(struct mlx5e_cq *cq) { - mlx5_wq_destroy(&cq->wq_ctrl); + mlx5_cqwq_destroy(&cq->wq_ctrl); }
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) @@@ -1270,7 -1201,7 +1273,7 @@@ int err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * cq->wq_ctrl.buf.npages; + sizeof(u64) * cq->wq_ctrl.frag_buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; @@@ -1279,15 -1210,15 +1282,15 @@@
memcpy(cqc, param->cqc, sizeof(param->cqc));
- mlx5_fill_page_array(&cq->wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@@ -1605,6 -1536,7 +1608,6 @@@ err_close_icosq_cq
err_napi_del: netif_napi_del(&c->napi); - napi_hash_del(&c->napi); kfree(c);
return err; @@@ -1625,6 -1557,9 +1628,6 @@@ static void mlx5e_close_channel(struct mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi);
- napi_hash_del(&c->napi); - synchronize_rcu(); - kfree(c); }
@@@ -1717,7 -1652,7 +1720,7 @@@ static void mlx5e_build_rx_cq_param(str }
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); - if (priv->params.rx_cqe_compress) { + if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) { MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); MLX5_SET(cqc, cqc, cqe_comp_en, 1); } @@@ -2189,7 -2124,7 +2192,7 @@@ int mlx5e_open_locked(struct net_devic goto err_clear_state_opened_flag; }
- err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev); + err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false); if (err) { netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", __func__, err); @@@ -2707,7 -2642,7 +2710,7 @@@ mqprio return mlx5e_setup_tc(dev, tc->tc); }
-struct rtnl_link_stats64 * +static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@@ -2715,20 -2650,13 +2718,20 @@@ struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- stats->rx_packets = sstats->rx_packets; - stats->rx_bytes = sstats->rx_bytes; - stats->tx_packets = sstats->tx_packets; - stats->tx_bytes = sstats->tx_bytes; + if (mlx5e_is_uplink_rep(priv)) { + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); + stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); + stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); + stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); + } else { + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; + }
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; - stats->tx_dropped = sstats->tx_queue_dropped;
stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + @@@ -2925,13 -2853,31 +2928,13 @@@ static int mlx5e_set_features(struct ne return err ? -EINVAL : 0; }
-#define MXL5_HW_MIN_MTU 64 -#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) - static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; bool was_opened; - u16 max_mtu; - u16 min_mtu; int err = 0; bool reset;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1); - - max_mtu = MLX5E_HW2SW_MTU(max_mtu); - min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); - - if (new_mtu > max_mtu || new_mtu < min_mtu) { - netdev_err(netdev, - "%s: Bad MTU (%d), valid range is: [%d..%d]\n", - __func__, new_mtu, min_mtu, max_mtu); - return -EINVAL; - } - mutex_lock(&priv->state_lock);
reset = !priv->params.lro_en && @@@ -3001,20 -2947,6 +3004,20 @@@ static int mlx5e_set_vf_trust(struct ne
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); } + +static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (min_tx_rate) + return -EOPNOTSUPP; + + return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, + max_tx_rate); +} + static int mlx5_vport_link2ifla(u8 esw_link) { switch (esw_link) { @@@ -3071,8 -3003,8 +3074,8 @@@ static int mlx5e_get_vf_stats(struct ne vf_stats); }
-static void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +void mlx5e_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
@@@ -3085,8 -3017,8 +3088,8 @@@ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); }
-static void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +void mlx5e_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
@@@ -3163,7 -3095,7 +3166,7 @@@ static void mlx5e_tx_timeout(struct net if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; sched_work = true; - set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); } @@@ -3194,21 -3126,11 +3197,21 @@@ static int mlx5e_xdp_set(struct net_dev
if (was_opened && reset) mlx5e_close_locked(netdev); + if (was_opened && !reset) { + /* num_channels is invariant here, so we can take the + * batched reference right upfront. + */ + prog = bpf_prog_add(prog, priv->params.num_channels); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto unlock; + } + }
- /* exchange programs */ + /* exchange programs, extra prog reference we got from caller + * as long as we don't fail from this point onwards. + */ old_prog = xchg(&priv->xdp_prog, prog); - if (prog) - bpf_prog_add(prog, 1); if (old_prog) bpf_prog_put(old_prog);
@@@ -3224,16 -3146,17 +3227,16 @@@ /* exchanging programs w/o reset, we update ref counts on behalf * of the channels RQs here. */ - bpf_prog_add(prog, priv->params.num_channels); for (i = 0; i < priv->params.num_channels; i++) { struct mlx5e_channel *c = priv->channel[i];
- set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state); + clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); napi_synchronize(&c->napi); /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
old_prog = xchg(&c->rq.xdp_prog, prog);
- clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state); + set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); /* napi_schedule in case we have missed anything */ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); napi_schedule(&c->napi); @@@ -3331,7 -3254,6 +3334,7 @@@ static const struct net_device_ops mlx5 .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, .ndo_set_vf_trust = mlx5e_set_vf_trust, + .ndo_set_vf_rate = mlx5e_set_vf_rate, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, @@@ -3340,8 -3262,6 +3343,8 @@@ #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, };
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@@ -3378,6 -3298,24 +3381,6 @@@ u16 mlx5e_get_max_inline_cap(struct mlx 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; }
-#ifdef CONFIG_MLX5_CORE_EN_DCB -static void mlx5e_ets_init(struct mlx5e_priv *priv) -{ - int i; - - priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; - for (i = 0; i < priv->params.ets.ets_cap; i++) { - priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; - priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; - priv->params.ets.prio_tc[i] = i; - } - - /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ - priv->params.ets.prio_tc[0] = 1; - priv->params.ets.prio_tc[1] = 0; -} -#endif - void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, u32 *indirection_rqt, int len, int num_channels) @@@ -3452,13 -3390,14 +3455,13 @@@ static void mlx5e_query_min_inline(stru u8 *min_inline_mode) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5E_INLINE_MODE_L2: + case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; - case MLX5E_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, - min_inline_mode); + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); break; - case MLX5_INLINE_MODE_NOT_REQUIRED: + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: *min_inline_mode = MLX5_INLINE_MODE_NONE; break; } @@@ -3500,16 -3439,17 +3503,16 @@@ static void mlx5e_build_nic_netdev_priv priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */ - priv->params.rx_cqe_compress_admin = false; + priv->params.rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && MLX5_CAP_GEN(mdev, vport_group_manager)) { mlx5e_get_max_linkspeed(mdev, &link_speed); mlx5e_get_pci_bw(mdev, &pci_bw); mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", link_speed, pci_bw); - priv->params.rx_cqe_compress_admin = + priv->params.rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); } - priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
mlx5e_set_rq_priv_params(priv); if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) @@@ -3540,9 -3480,12 +3543,9 @@@ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */ - MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, - priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); - -#ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_ets_init(priv); -#endif + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def);
mutex_init(&priv->state_lock);
@@@ -3580,8 -3523,7 +3583,8 @@@ static void mlx5e_build_nic_netdev(stru if (MLX5_CAP_GEN(mdev, vport_group_manager)) { netdev->netdev_ops = &mlx5e_netdev_ops_sriov; #ifdef CONFIG_MLX5_CORE_EN_DCB - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; + if (MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; #endif } else { netdev->netdev_ops = &mlx5e_netdev_ops_basic; @@@ -3677,6 -3619,43 +3680,6 @@@ static void mlx5e_destroy_q_counter(str mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); }
-static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), - BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); - int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - void *mkc; - u32 *in; - int err; - - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - - npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); - - MLX5_SET(mkc, mkc, free, 1); - MLX5_SET(mkc, mkc, umr_en, 1); - MLX5_SET(mkc, mkc, lw, 1); - MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); - - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); - MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT); - MLX5_SET(mkc, mkc, translations_octword_size, - MLX5_MTT_OCTW(npages)); - MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); - - err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen); - - kvfree(in); - return err; -} - static void mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile, @@@ -3698,9 -3677,6 +3701,9 @@@ static void mlx5e_nic_cleanup(struct ml
if (MLX5_CAP_GEN(mdev, vport_group_manager)) mlx5_eswitch_unregister_vport_rep(esw, 0); + + if (priv->xdp_prog) + bpf_prog_put(priv->xdp_prog); }
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) @@@ -3783,7 -3759,7 +3786,7 @@@ static int mlx5e_init_nic_tx(struct mlx }
#ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); + mlx5e_dcbnl_initialize(priv); #endif return 0; } @@@ -3811,7 -3787,7 +3814,7 @@@ static void mlx5e_nic_enable(struct mlx rep.load = mlx5e_nic_rep_load; rep.unload = mlx5e_nic_rep_unload; rep.vport = FDB_UPLINK_VPORT; - rep.priv_data = priv; + rep.netdev = netdev; mlx5_eswitch_register_vport_rep(esw, 0, &rep); } } @@@ -3876,16 -3852,21 +3879,16 @@@ int mlx5e_attach_netdev(struct mlx5_cor { const struct mlx5e_profile *profile; struct mlx5e_priv *priv; + u16 max_mtu; int err;
priv = netdev_priv(netdev); profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
- err = mlx5e_create_umr_mkey(priv); - if (err) { - mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); - goto out; - } - err = profile->init_tx(priv); if (err) - goto err_destroy_umr_mkey; + goto out;
err = mlx5e_open_drop_rq(priv); if (err) { @@@ -3901,11 -3882,6 +3904,11 @@@
mlx5e_init_l2_addr(priv);
+ /* MTU range: 68 - hw-specific max */ + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu); + mlx5e_set_dev_port_mtu(netdev);
if (profile->enable) @@@ -3925,6 -3901,9 +3928,6 @@@ err_close_drop_rq err_cleanup_tx: profile->cleanup_tx(priv);
-err_destroy_umr_mkey: - mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); - out: return err; } @@@ -3973,6 -3952,7 +3976,6 @@@ void mlx5e_detach_netdev(struct mlx5_co profile->cleanup_rx(priv); mlx5e_close_drop_rq(priv); profile->cleanup_tx(priv); - mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); cancel_delayed_work_sync(&priv->update_stats_work); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 42cd687,33495d8..0e2fb3e --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@@ -164,14 -164,14 +164,14 @@@ void mlx5e_modify_rx_cqe_compression(st
mutex_lock(&priv->state_lock);
- if (priv->params.rx_cqe_compress == val) + if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) goto unlock;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) mlx5e_close_locked(priv->netdev);
- priv->params.rx_cqe_compress = val; + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
if (was_opened) mlx5e_open_locked(priv->netdev); @@@ -340,7 -340,7 +340,7 @@@ static inline void mlx5e_post_umr_wqe(s while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; sq->db.ico_wqe[pi].num_wqebbs = 1; - mlx5e_send_nop(sq, true); + mlx5e_send_nop(sq, false); }
wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@@ -412,7 -412,7 +412,7 @@@ void mlx5e_post_rx_mpwqe(struct mlx5e_r
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) { + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) { mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); return; } @@@ -445,7 -445,7 +445,7 @@@ void mlx5e_dealloc_rx_mpwqe(struct mlx5 }
#define RQ_CANNOT_POST(rq) \ - (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \ + (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) @@@ -737,10 -737,10 +737,10 @@@ static inlin struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u16 wqe_counter, u32 cqe_bcnt) { - struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog); struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + bool consumed;
di = &rq->dma_info[wqe_counter]; va = page_address(di->page); @@@ -759,11 -759,7 +759,11 @@@ return NULL; }
- if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt)) + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, + cqe_bcnt); + rcu_read_unlock(); + if (consumed) return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq)); @@@ -928,7 -924,7 +928,7 @@@ int mlx5e_poll_rx_cq(struct mlx5e_cq *c struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq; int work_done = 0;
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0;
if (cq->decmprs_left) diff --combined drivers/net/ethernet/mellanox/mlx5/core/main.c index 2dc2869,ada24e1..7b4c339 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@@ -62,13 -62,13 +62,13 @@@ MODULE_DESCRIPTION("Mellanox Connect-IB MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION);
- int mlx5_core_debug_mask; - module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); + unsigned int mlx5_core_debug_mask; + module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
#define MLX5_DEFAULT_PROF 2 - static int prof_sel = MLX5_DEFAULT_PROF; - module_param_named(prof_sel, prof_sel, int, 0444); + static unsigned int prof_sel = MLX5_DEFAULT_PROF; + module_param_named(prof_sel, prof_sel, uint, 0444); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
enum { @@@ -174,41 -174,6 +174,41 @@@ static int wait_fw_init(struct mlx5_cor return err; }
+static void mlx5_set_driver_version(struct mlx5_core_dev *dev) +{ + int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in, + driver_version); + u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0}; + u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0}; + int remaining_size = driver_ver_sz; + char *string; + + if (!MLX5_CAP_GEN(dev, driver_version)) + return; + + string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version); + + strncpy(string, "Linux", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, ",", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, DRIVER_NAME, remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, ",", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, DRIVER_VERSION, remaining_size); + + /*Send the command*/ + MLX5_SET(set_driver_version_in, in, opcode, + MLX5_CMD_OP_SET_DRIVER_VERSION); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static int set_dma_caps(struct pci_dev *pdev) { int err; @@@ -767,13 -732,15 +767,15 @@@ static int mlx5_core_set_issi(struct ml u8 status;
mlx5_cmd_mbox_status(query_out, &status, &syndrome); - if (status == MLX5_CMD_STAT_BAD_OP_ERR) { - pr_debug("Only ISSI 0 is supported\n"); - return 0; + if (!status || syndrome == MLX5_DRIVER_SYND) { + mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n", + err, status, syndrome); + return err; }
- pr_err("failed to query ISSI err(%d)\n", err); - return err; + mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n"); + dev->issi = 0; + return 0; }
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); @@@ -787,7 -754,8 +789,8 @@@ err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); if (err) { - pr_err("failed to set ISSI=1 err(%d)\n", err); + mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n", + err); return err; }
@@@ -1049,8 -1017,6 +1052,8 @@@ static int mlx5_load_one(struct mlx5_co goto err_pagealloc_stop; }
+ mlx5_set_driver_version(dev); + mlx5_start_health_poll(dev);
err = mlx5_query_hca_caps(dev); @@@ -1238,8 -1204,6 +1241,8 @@@ static const struct devlink_ops mlx5_de #ifdef CONFIG_MLX5_CORE_EN .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, + .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, + .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, #endif };
@@@ -1266,13 -1230,6 +1269,6 @@@ static int init_one(struct pci_dev *pde
dev->pdev = pdev; dev->event = mlx5_core_event; - - if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { - mlx5_core_warn(dev, - "selected profile out of range, selecting default (%d)\n", - MLX5_DEFAULT_PROF); - prof_sel = MLX5_DEFAULT_PROF; - } dev->profile = &profile[prof_sel];
INIT_LIST_HEAD(&priv->ctx_list); @@@ -1462,7 -1419,6 +1458,7 @@@ static const struct pci_device_id mlx5_ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ + { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */ { 0, } };
@@@ -1490,10 -1446,22 +1486,22 @@@ static struct pci_driver mlx5_core_driv .sriov_configure = mlx5_core_sriov_configure, };
+ static void mlx5_core_verify_params(void) + { + if (prof_sel >= ARRAY_SIZE(profile)) { + pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n", + prof_sel, + ARRAY_SIZE(profile) - 1, + MLX5_DEFAULT_PROF); + prof_sel = MLX5_DEFAULT_PROF; + } + } + static int __init init(void) { int err;
+ mlx5_core_verify_params(); mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver); diff --combined drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 7e635eb,63b9a0d..e0a8fbd --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@@ -44,11 -44,11 +44,11 @@@
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
- extern int mlx5_core_debug_mask; + extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \ - dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ - (__dev)->priv.name, __func__, __LINE__, current->pid, \ + dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__)
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \ @@@ -63,8 -63,8 +63,8 @@@ do { ##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \ - dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ - (__dev)->priv.name, __func__, __LINE__, current->pid, \ + dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \ @@@ -75,13 -75,17 +75,18 @@@ enum MLX5_CMD_TIME, /* print command execution time */ };
+ enum { + MLX5_DRIVER_STATUS_ABORTED = 0xfe, + MLX5_DRIVER_SYND = 0xbadd00de, + }; + int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); +void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5_recover_device(struct mlx5_core_dev *dev); @@@ -93,13 -97,6 +98,13 @@@ int mlx5_core_sriov_configure(struct pc bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + void *context, u32 *element_id); +int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + void *context, u32 element_id, + u32 modify_bitmask); +int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + u32 element_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); @@@ -122,12 -119,6 +127,12 @@@ struct mlx5_core_dev *mlx5_get_next_phy void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); int mlx5_dev_list_trylock(void); +int mlx5_encap_alloc(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id); +void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
diff --combined drivers/net/ethernet/qlogic/qed/qed_ll2.c index de4e2a2,62ae55b..8e5cb76 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@@ -36,7 -36,6 +36,7 @@@ #include "qed_int.h" #include "qed_ll2.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_roce.h" @@@ -297,34 -296,25 +297,34 @@@ static void qed_ll2_txq_flush(struct qe list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - p_tx->cur_completing_packet = *p_pkt; - p_tx->cur_completing_bd_idx = 1; - b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; - tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->gsi_enable) - qed_ll2b_release_tx_gsi_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + struct qed_ooo_buffer *p_buffer;
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + p_tx->cur_completing_packet = *p_pkt; + p_tx->cur_completing_bd_idx = 1; + b_last_frag = + p_tx->cur_completing_bd_idx == p_pkt->bd_used; + tx_frag = p_pkt->bds_set[0].tx_frag; + if (p_ll2_conn->gsi_enable) + qed_ll2b_release_tx_gsi_packet(p_hwfn, + p_ll2_conn-> + my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + else + qed_ll2b_complete_tx_packet(p_hwfn, + p_ll2_conn->my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + } } }
@@@ -550,457 -540,12 +550,457 @@@ static void qed_ll2_rxq_flush(struct qe
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- rx_buf_addr = p_pkt->rx_buf_addr; - cookie = p_pkt->cookie; + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + struct qed_ooo_buffer *p_buffer; + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + rx_buf_addr = p_pkt->rx_buf_addr; + cookie = p_pkt->cookie; + + b_last = list_empty(&p_rx->active_descq); + } + } +} + +#if IS_ENABLED(CONFIG_QED_ISCSI) +static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) +{ + u8 bd_flags = 0; + + if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1); + + return bd_flags; +} + +static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + u16 packet_length = 0, parse_flags = 0, vlan = 0; + struct qed_ll2_rx_packet *p_pkt = NULL; + u32 num_ooo_add_to_peninsula = 0, cid; + union core_rx_cqe_union *cqe = NULL; + u16 cq_new_idx = 0, cq_old_idx = 0; + struct qed_ooo_buffer *p_buffer; + struct ooo_opaque *iscsi_ooo; + u8 placement_offset = 0; + u8 cqe_type; + + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + if (cq_new_idx == cq_old_idx) + return 0; + + while (cq_new_idx != cq_old_idx) { + struct core_rx_fast_path_cqe *p_cqe_fp; + + cqe = qed_chain_consume(&p_rx->rcq_chain); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + cqe_type = cqe->rx_cqe_sp.type; + + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { + DP_NOTICE(p_hwfn, + "Got a non-regular LB LL2 completion [type 0x%02x]\n", + cqe_type); + return -EINVAL; + } + p_cqe_fp = &cqe->rx_cqe_fp; + + placement_offset = p_cqe_fp->placement_offset; + parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); + packet_length = le16_to_cpu(p_cqe_fp->packet_length); + vlan = le16_to_cpu(p_cqe_fp->vlan); + iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, + iscsi_ooo); + cid = le32_to_cpu(iscsi_ooo->cid); + + /* Process delete isle first */ + if (iscsi_ooo->drop_size) + qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, + iscsi_ooo->drop_isle, + iscsi_ooo->drop_size); + + if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) + continue; + + /* Now process create/add/join isles */ + if (list_empty(&p_rx->active_descq)) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX chain has no submitted buffers\n" + ); + return -EIO; + } + + p_pkt = list_first_entry(&p_rx->active_descq, + struct qed_ll2_rx_packet, list_entry); + + if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { + if (!p_pkt) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX packet is not valid\n"); + return -EIO; + } + list_del(&p_pkt->list_entry); + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + p_buffer->packet_length = packet_length; + p_buffer->parse_flags = parse_flags; + p_buffer->vlan = vlan; + p_buffer->placement_offset = placement_offset; + qed_chain_consume(&p_rx->rxq_chain); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + + switch (iscsi_ooo->ooo_opcode) { + case TCP_EVENT_ADD_NEW_ISLE: + qed_ooo_add_new_isle(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer); + break; + case TCP_EVENT_ADD_ISLE_RIGHT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer, + QED_OOO_RIGHT_BUF); + break; + case TCP_EVENT_ADD_ISLE_LEFT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer, + QED_OOO_LEFT_BUF); + break; + case TCP_EVENT_JOIN: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle + + 1, + p_buffer, + QED_OOO_LEFT_BUF); + qed_ooo_join_isles(p_hwfn, + p_hwfn->p_ooo_info, + cid, iscsi_ooo->ooo_isle); + break; + case TCP_EVENT_ADD_PEN: + num_ooo_add_to_peninsula++; + qed_ooo_put_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info, + p_buffer, true); + break; + } + } else { + DP_NOTICE(p_hwfn, + "Unexpected event (%d) TX OOO completion\n", + iscsi_ooo->ooo_opcode); + } + } + + return 0; +} + +static void +qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + u16 l4_hdr_offset_w; + dma_addr_t first_frag; + u16 parse_flags; + u8 bd_flags; + + /* Submit Tx buffers here */ + while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + l4_hdr_offset_w = 0; + bd_flags = 0; + + first_frag = p_buffer->rx_buffer_phys_addr + + p_buffer->placement_offset; + parse_flags = p_buffer->parse_flags; + bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1); + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1); + + rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, + p_buffer->vlan, bd_flags, + l4_hdr_offset_w, + p_ll2_conn->tx_dest, 0, + first_frag, + p_buffer->packet_length, + p_buffer, true); + if (rc) { + qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer, false); + break; + } + } +} + +static void +qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + rc = qed_ll2_post_rx_buffer(p_hwfn, + p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, + 0, p_buffer, true); + if (rc) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + break; + } + } +} + +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + int rc; + + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); + if (rc) + return rc; + + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct qed_ll2_tx_packet *p_pkt = NULL; + struct qed_ooo_buffer *p_buffer; + bool b_dont_submit_rx = false; + u16 new_idx = 0, num_bds = 0; + int rc; + + new_idx = le16_to_cpu(*p_tx->p_fw_cons); + num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); + + if (!num_bds) + return 0; + + while (num_bds) { + if (list_empty(&p_tx->active_descq)) + return -EINVAL; + + p_pkt = list_first_entry(&p_tx->active_descq, + struct qed_ll2_tx_packet, list_entry); + if (!p_pkt) + return -EINVAL; + + if (p_pkt->bd_used != 1) { + DP_NOTICE(p_hwfn, + "Unexpectedly many BDs(%d) in TX OOO completion\n", + p_pkt->bd_used); + return -EINVAL; + } + + list_del(&p_pkt->list_entry); + + num_bds--; + p_tx->bds_idx++; + qed_chain_consume(&p_tx->txq_chain); + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + + if (b_dont_submit_rx) { + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + continue; + } + + rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, 0, + p_buffer, true); + if (rc != 0) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + b_dont_submit_rx = true; + } + } + + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, + u16 rx_num_ooo_buffers, u16 mtu) +{ + struct qed_ooo_buffer *p_buf = NULL; + void *p_virt; + u16 buf_idx; + int rc = 0; + + if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return rc; + + if (!rx_num_ooo_buffers) + return -EINVAL; + + for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) { + p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); + if (!p_buf) { + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; + p_buf->rx_buffer_size = (p_buf->rx_buffer_size + + ETH_CACHE_LINE_SIZE - 1) & + ~(ETH_CACHE_LINE_SIZE - 1); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_buf->rx_buffer_size, + &p_buf->rx_buffer_phys_addr, + GFP_KERNEL); + if (!p_virt) { + kfree(p_buf); + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_virt_addr = p_virt; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", + rx_num_ooo_buffers, p_buf->rx_buffer_size); + +out: + return rc; +} + +static void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); +} + +static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + + if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } +} + +static void qed_ll2_stop_ooo(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n", + *handle); + + qed_ll2_terminate_connection(hwfn, *handle); + qed_ll2_release_connection(hwfn, *handle); + *handle = QED_LL2_UNUSED_HANDLE; +} + +static int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + struct qed_ll2_info *ll2_info; + int rc; + + ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); + if (!ll2_info) + return -ENOMEM; + ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; + ll2_info->mtu = params->mtu; + ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; + ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; + ll2_info->tx_tc = OOO_LB_TC; + ll2_info->tx_dest = CORE_TX_DEST_LB; + + rc = qed_ll2_acquire_connection(hwfn, ll2_info, + QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, + handle); + kfree(ll2_info); + if (rc) { + DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); + goto out; + }
- b_last = list_empty(&p_rx->active_descq); + rc = qed_ll2_establish_connection(hwfn, *handle); + if (rc) { + DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); + goto fail; } + + return 0; + +fail: + qed_ll2_release_connection(hwfn, *handle); +out: + *handle = QED_LL2_UNUSED_HANDLE; + return rc; } +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, + void *p_cookie) { return -EINVAL; } +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, + void *p_cookie) { return -EINVAL; } +static inline int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, + u16 rx_num_ooo_buffers, u16 mtu) { return 0; } +static inline void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { return; } +static inline void +qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { return; } +static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; } +static inline int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) + { return -EINVAL; } +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, @@@ -1043,8 -588,7 +1043,8 @@@ p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; - p_ramrod->main_func_queue = 1; + p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 + : 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { @@@ -1075,11 -619,6 +1075,11 @@@ static int qed_sp_ll2_tx_queue_start(st if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0;
+ if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + p_ll2_conn->tx_stats_en = 0; + else + p_ll2_conn->tx_stats_en = 1; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; @@@ -1097,6 -636,7 +1097,6 @@@ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); - p_ll2_conn->tx_stats_en = 1; p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@@ -1320,19 -860,9 +1320,19 @@@ int qed_ll2_acquire_connection(struct q if (rc) goto q_allocate_fail;
+ rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, + rx_num_desc * 2, p_params->mtu); + if (rc) + goto q_allocate_fail; + /* Register callbacks for the Rx/Tx queues */ - comp_rx_cb = qed_ll2_rxq_completion; - comp_tx_cb = qed_ll2_txq_completion; + if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + comp_rx_cb = qed_ll2_lb_rxq_completion; + comp_tx_cb = qed_ll2_lb_txq_completion; + } else { + comp_rx_cb = qed_ll2_rxq_completion; + comp_tx_cb = qed_ll2_txq_completion; + }
if (rx_num_desc) { qed_int_register_cb(p_hwfn, comp_rx_cb, @@@ -1445,8 -975,6 +1445,8 @@@ int qed_ll2_establish_connection(struc if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+ qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + return rc; }
@@@ -1685,7 -1213,6 +1685,7 @@@ int qed_ll2_prepare_tx_packet(struct qe u16 vlan, u8 bd_flags, u16 l4_hdr_offset_w, + enum qed_ll2_tx_dest e_tx_dest, enum qed_ll2_roce_flavor_type qed_roce_flavor, dma_addr_t first_frag, u16 first_frag_len, void *cookie, u8 notify_fw) @@@ -1695,7 -1222,6 +1695,7 @@@ enum core_roce_flavor_type roce_flavor; struct qed_ll2_tx_queue *p_tx; struct qed_chain *p_tx_chain; + enum core_tx_dest tx_dest; unsigned long flags; int rc = 0;
@@@ -1726,8 -1252,6 +1726,8 @@@ goto out; }
+ tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW : + CORE_TX_DEST_LB; if (qed_roce_flavor == QED_LL2_ROCE) { roce_flavor = CORE_ROCE; } else if (qed_roce_flavor == QED_LL2_RROCE) { @@@ -1742,7 -1266,7 +1742,7 @@@ num_of_bds, first_frag, first_frag_len, cookie, notify_fw); qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, - num_of_bds, CORE_TX_DEST_NW, + num_of_bds, tx_dest, vlan, bd_flags, l4_hdr_offset_w, roce_flavor, first_frag, first_frag_len); @@@ -1817,9 -1341,6 +1817,9 @@@ int qed_ll2_terminate_connection(struc qed_ll2_rxq_flush(p_hwfn, connection_handle); }
+ if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + return rc; }
@@@ -1850,8 -1371,6 +1850,8 @@@ void qed_ll2_release_connection(struct
qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+ qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); + mutex_lock(&p_ll2_conn->mutex); p_ll2_conn->b_active = false; mutex_unlock(&p_ll2_conn->mutex); @@@ -1998,7 -1517,6 +1998,7 @@@ static int qed_ll2_start(struct qed_de enum qed_ll2_conn_type conn_type; struct qed_ptt *p_ptt; int rc, i; + u8 gsi_enable = 1;
/* Initialize LL2 locks & lists */ INIT_LIST_HEAD(&cdev->ll2->list); @@@ -2030,7 -1548,6 +2030,7 @@@ switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; + gsi_enable = 0; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; @@@ -2047,7 -1564,7 +2047,7 @@@ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; ll2_info.tx_tc = 0; ll2_info.tx_dest = CORE_TX_DEST_NW; - ll2_info.gsi_enable = 1; + ll2_info.gsi_enable = gsi_enable;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, @@@ -2094,17 -1611,6 +2094,17 @@@ goto release_terminate; }
+ if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && + cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); + rc = qed_ll2_start_ooo(cdev, params); + if (rc) { + DP_INFO(cdev, + "Failed to initialize the OOO LL2 queue\n"); + goto release_terminate; + } + } + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (!p_ptt) { DP_INFO(cdev, "Failed to acquire PTT\n"); @@@ -2154,10 -1660,6 +2154,10 @@@ static int qed_ll2_stop(struct qed_dev qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); eth_zero_addr(cdev->ll2_mac_address);
+ if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && + cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) + qed_ll2_stop_ooo(cdev); + rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle); if (rc) @@@ -2212,8 -1714,7 +2212,8 @@@ static int qed_ll2_start_xmit(struct qe rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), cdev->ll2->handle, 1 + skb_shinfo(skb)->nr_frags, - vlan, flags, 0, 0 /* RoCE FLAVOR */, + vlan, flags, 0, QED_LL2_TX_DEST_NW, + 0 /* RoCE FLAVOR */, mapping, skb->len, skb, 1); if (rc) goto err; @@@ -2229,6 -1730,7 +2229,7 @@@ mapping))) { DP_NOTICE(cdev, "Unable to map frag - dropping packet\n"); + rc = -ENOMEM; goto err; } } else { diff --combined drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 577316d,32bc2fc..e81b6e5 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@@ -30,9 -30,11 +30,11 @@@ static void dwmac4_dma_axi(void __iome if (axi->axi_xit_frm) value |= DMA_AXI_LPI_XIT_FRM;
+ value &= ~DMA_AXI_WR_OSR_LMT; value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << DMA_AXI_WR_OSR_LMT_SHIFT;
+ value &= ~DMA_AXI_RD_OSR_LMT; value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << DMA_AXI_RD_OSR_LMT_SHIFT;
@@@ -213,17 -215,7 +215,17 @@@ static void dwmac4_dma_chan_op_mode(voi else mtl_tx_op |= MTL_OP_MODE_TTC_512; } - + /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO + * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. + * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W + * with reset values: TXQEN off, TQS 256 bytes. + * + * Write the bits in both cases, since it will have no effect when RO. + * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might + * be RO, however, writing the whole TQS field will result in a value + * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1. + */ + mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 98bf86d,ac3d39c..e528e71 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@@ -126,8 -126,10 +126,10 @@@ static struct stmmac_axi *stmmac_axi_se axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
- of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt); - of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt); + if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) + axi->axi_wr_osr_lmt = 1; + if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) + axi->axi_rd_osr_lmt = 1; of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); of_node_put(np);
@@@ -442,7 -444,9 +444,7 @@@ static int stmmac_pltfr_suspend(struct struct platform_device *pdev = to_platform_device(dev);
ret = stmmac_suspend(dev); - if (priv->plat->suspend) - priv->plat->suspend(pdev, priv->plat->bsp_priv); - else if (priv->plat->exit) + if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret; @@@ -461,7 -465,9 +463,7 @@@ static int stmmac_pltfr_resume(struct d struct stmmac_priv *priv = netdev_priv(ndev); struct platform_device *pdev = to_platform_device(dev);
- if (priv->plat->resume) - priv->plat->resume(pdev, priv->plat->bsp_priv); - else if (priv->plat->init) + if (priv->plat->init) priv->plat->init(pdev, priv->plat->bsp_priv);
return stmmac_resume(dev); diff --combined drivers/net/usb/lan78xx.c index 0c459e9,f33460c..019f758 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@@ -30,17 -30,13 +30,17 @@@ #include <linux/ipv6.h> #include <linux/mdio.h> #include <net/ip6_checksum.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> #include <linux/microchipphy.h> #include "lan78xx.h"
#define DRIVER_AUTHOR "WOOJUNG HUH woojung.huh@microchip.com" #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.4" +#define DRIVER_VERSION "1.0.5"
#define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@@ -93,38 -89,6 +93,38 @@@ /* statistic update interval (mSec) */ #define STAT_UPDATE_TIMER (1 * 1000)
+/* defines interrupts from interrupt EP */ +#define MAX_INT_EP (32) +#define INT_EP_INTEP (31) +#define INT_EP_OTP_WR_DONE (28) +#define INT_EP_EEE_TX_LPI_START (26) +#define INT_EP_EEE_TX_LPI_STOP (25) +#define INT_EP_EEE_RX_LPI (24) +#define INT_EP_MAC_RESET_TIMEOUT (23) +#define INT_EP_RDFO (22) +#define INT_EP_TXE (21) +#define INT_EP_USB_STATUS (20) +#define INT_EP_TX_DIS (19) +#define INT_EP_RX_DIS (18) +#define INT_EP_PHY (17) +#define INT_EP_DP (16) +#define INT_EP_MAC_ERR (15) +#define INT_EP_TDFU (14) +#define INT_EP_TDFO (13) +#define INT_EP_UTX (12) +#define INT_EP_GPIO_11 (11) +#define INT_EP_GPIO_10 (10) +#define INT_EP_GPIO_9 (9) +#define INT_EP_GPIO_8 (8) +#define INT_EP_GPIO_7 (7) +#define INT_EP_GPIO_6 (6) +#define INT_EP_GPIO_5 (5) +#define INT_EP_GPIO_4 (4) +#define INT_EP_GPIO_3 (3) +#define INT_EP_GPIO_2 (2) +#define INT_EP_GPIO_1 (1) +#define INT_EP_GPIO_0 (0) + static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = { "RX FCS Errors", "RX Alignment Errors", @@@ -332,15 -296,6 +332,15 @@@ struct statstage struct lan78xx_statstage64 curr_stat; };
+struct irq_domain_data { + struct irq_domain *irqdomain; + unsigned int phyirq; + struct irq_chip *irqchip; + irq_flow_handler_t irq_handler; + u32 irqenable; + struct mutex irq_lock; /* for irq bus access */ +}; + struct lan78xx_net { struct net_device *net; struct usb_device *udev; @@@ -396,8 -351,6 +396,8 @@@
int delta; struct statstage stats; + + struct irq_domain_data domain_data; };
/* use ethtool to change the level for any given device */ @@@ -1139,10 -1092,15 +1139,10 @@@ static int lan78xx_update_flowcontrol(s static int lan78xx_link_reset(struct lan78xx_net *dev) { struct phy_device *phydev = dev->net->phydev; - struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + struct ethtool_link_ksettings ecmd; int ladv, radv, ret; u32 buf;
- /* clear PHY interrupt status */ - ret = phy_read(phydev, LAN88XX_INT_STS); - if (unlikely(ret < 0)) - return -EIO; - /* clear LAN78xx interrupt status */ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); if (unlikely(ret < 0)) @@@ -1162,14 -1120,18 +1162,14 @@@ if (unlikely(ret < 0)) return -EIO;
- phy_mac_interrupt(phydev, 0); - del_timer(&dev->stat_monitor); } else if (phydev->link && !dev->link_on) { dev->link_on = true;
- phy_ethtool_gset(phydev, &ecmd); - - ret = phy_read(phydev, LAN88XX_INT_STS); + phy_ethtool_ksettings_get(phydev, &ecmd);
if (dev->udev->speed == USB_SPEED_SUPER) { - if (ethtool_cmd_speed(&ecmd) == 1000) { + if (ecmd.base.speed == 1000) { /* disable U2 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); buf &= ~USB_CFG1_DEV_U2_INIT_EN_; @@@ -1197,10 -1159,10 +1197,10 @@@
netif_dbg(dev, link, dev->net, "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x", - ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); + ecmd.base.speed, ecmd.base.duplex, ladv, radv);
- ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); - phy_mac_interrupt(phydev, 1); + ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv, + radv);
if (!timer_pending(&dev->stat_monitor)) { dev->delta = 1; @@@ -1239,10 -1201,7 +1239,10 @@@ static void lan78xx_status(struct lan78
if (intdata & INT_ENP_PHY_INT) { netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); - lan78xx_defer_kevent(dev, EVENT_LINK_RESET); + lan78xx_defer_kevent(dev, EVENT_LINK_RESET); + + if (dev->domain_data.phyirq > 0) + generic_handle_irq(dev->domain_data.phyirq); } else netdev_warn(dev->net, "unexpected interrupt: 0x%08x\n", intdata); @@@ -1447,6 -1406,11 +1447,6 @@@ static u32 lan78xx_get_link(struct net_ return net->phydev->link; }
-static int lan78xx_nway_reset(struct net_device *net) -{ - return phy_start_aneg(net->phydev); -} - static void lan78xx_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { @@@ -1471,26 -1435,88 +1471,26 @@@ static void lan78xx_set_msglevel(struc dev->msg_enable = level; }
-static int lan78xx_get_mdix_status(struct net_device *net) -{ - struct phy_device *phydev = net->phydev; - int buf; - - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1); - buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL); - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); - - return buf; -} - -static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl) -{ - struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - int buf; - - if (mdix_ctrl == ETH_TP_MDI) { - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, - LAN88XX_EXT_PAGE_SPACE_1); - buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL); - buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_; - phy_write(phydev, LAN88XX_EXT_MODE_CTRL, - buf | LAN88XX_EXT_MODE_CTRL_MDI_); - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, - LAN88XX_EXT_PAGE_SPACE_0); - } else if (mdix_ctrl == ETH_TP_MDI_X) { - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, - LAN88XX_EXT_PAGE_SPACE_1); - buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL); - buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_; - phy_write(phydev, LAN88XX_EXT_MODE_CTRL, - buf | LAN88XX_EXT_MODE_CTRL_MDI_X_); - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, - LAN88XX_EXT_PAGE_SPACE_0); - } else if (mdix_ctrl == ETH_TP_MDI_AUTO) { - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, - LAN88XX_EXT_PAGE_SPACE_1); - buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL); - buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_; - phy_write(phydev, LAN88XX_EXT_MODE_CTRL, - buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_); - phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, - LAN88XX_EXT_PAGE_SPACE_0); - } - dev->mdix_ctrl = mdix_ctrl; -} - -static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd) +static int lan78xx_get_link_ksettings(struct net_device *net, + struct ethtool_link_ksettings *cmd) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; int ret; - int buf;
ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret;
- ret = phy_ethtool_gset(phydev, cmd); - - buf = lan78xx_get_mdix_status(net); - - buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_; - if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) { - cmd->eth_tp_mdix = ETH_TP_MDI_AUTO; - cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; - } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) { - cmd->eth_tp_mdix = ETH_TP_MDI; - cmd->eth_tp_mdix_ctrl = ETH_TP_MDI; - } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) { - cmd->eth_tp_mdix = ETH_TP_MDI_X; - cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X; - } + ret = phy_ethtool_ksettings_get(phydev, cmd);
usb_autopm_put_interface(dev->intf);
return ret; }
-static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd) +static int lan78xx_set_link_ksettings(struct net_device *net, + const struct ethtool_link_ksettings *cmd) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; @@@ -1501,10 -1527,14 +1501,10 @@@ if (ret < 0) return ret;
- if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) { - lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl); - } - /* change speed & duplex */ - ret = phy_ethtool_sset(phydev, cmd); + ret = phy_ethtool_ksettings_set(phydev, cmd);
- if (!cmd->autoneg) { + if (!cmd->base.autoneg) { /* force link down */ temp = phy_read(phydev, MII_BMCR); phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK); @@@ -1522,9 -1552,9 +1522,9 @@@ static void lan78xx_get_pause(struct ne { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; - struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + struct ethtool_link_ksettings ecmd;
- phy_ethtool_gset(phydev, &ecmd); + phy_ethtool_ksettings_get(phydev, &ecmd);
pause->autoneg = dev->fc_autoneg;
@@@ -1540,12 -1570,12 +1540,12 @@@ static int lan78xx_set_pause(struct net { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; - struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + struct ethtool_link_ksettings ecmd; int ret;
- phy_ethtool_gset(phydev, &ecmd); + phy_ethtool_ksettings_get(phydev, &ecmd);
- if (pause->autoneg && !ecmd.autoneg) { + if (pause->autoneg && !ecmd.base.autoneg) { ret = -EINVAL; goto exit; } @@@ -1557,21 -1587,13 +1557,21 @@@ if (pause->tx_pause) dev->fc_request_control |= FLOW_CTRL_TX;
- if (ecmd.autoneg) { + if (ecmd.base.autoneg) { u32 mii_adv; + u32 advertising;
- ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + ethtool_convert_link_mode_to_legacy_u32( + &advertising, ecmd.link_modes.advertising); + + advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv); - phy_ethtool_sset(phydev, &ecmd); + advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + + ethtool_convert_legacy_u32_to_link_mode( + ecmd.link_modes.advertising, advertising); + + phy_ethtool_ksettings_set(phydev, &ecmd); }
dev->fc_autoneg = pause->autoneg; @@@ -1583,10 -1605,12 +1583,10 @@@ exit
static const struct ethtool_ops lan78xx_ethtool_ops = { .get_link = lan78xx_get_link, - .nway_reset = lan78xx_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_drvinfo = lan78xx_get_drvinfo, .get_msglevel = lan78xx_get_msglevel, .set_msglevel = lan78xx_set_msglevel, - .get_settings = lan78xx_get_settings, - .set_settings = lan78xx_set_settings, .get_eeprom_len = lan78xx_ethtool_get_eeprom_len, .get_eeprom = lan78xx_ethtool_get_eeprom, .set_eeprom = lan78xx_ethtool_set_eeprom, @@@ -1599,8 -1623,6 +1599,8 @@@ .set_eee = lan78xx_set_eee, .get_pauseparam = lan78xx_get_pause, .set_pauseparam = lan78xx_set_pause, + .get_link_ksettings = lan78xx_get_link_ksettings, + .set_link_ksettings = lan78xx_set_link_ksettings, };
static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@@ -1812,127 -1834,6 +1812,127 @@@ static void lan78xx_link_status_change( } }
+static int irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct irq_domain_data *data = d->host_data; + + irq_set_chip_data(irq, data); + irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler); + irq_set_noprobe(irq); + + return 0; +} + +static void irq_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); +} + +static const struct irq_domain_ops chip_domain_ops = { + .map = irq_map, + .unmap = irq_unmap, +}; + +static void lan78xx_irq_mask(struct irq_data *irqd) +{ + struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); + + data->irqenable &= ~BIT(irqd_to_hwirq(irqd)); +} + +static void lan78xx_irq_unmask(struct irq_data *irqd) +{ + struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); + + data->irqenable |= BIT(irqd_to_hwirq(irqd)); +} + +static void lan78xx_irq_bus_lock(struct irq_data *irqd) +{ + struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); + + mutex_lock(&data->irq_lock); +} + +static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd) +{ + struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); + struct lan78xx_net *dev = + container_of(data, struct lan78xx_net, domain_data); + u32 buf; + int ret; + + /* call register access here because irq_bus_lock & irq_bus_sync_unlock + * are only two callbacks executed in non-atomic contex. + */ + ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + if (buf != data->irqenable) + ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); + + mutex_unlock(&data->irq_lock); +} + +static struct irq_chip lan78xx_irqchip = { + .name = "lan78xx-irqs", + .irq_mask = lan78xx_irq_mask, + .irq_unmask = lan78xx_irq_unmask, + .irq_bus_lock = lan78xx_irq_bus_lock, + .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock, +}; + +static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) +{ + struct device_node *of_node; + struct irq_domain *irqdomain; + unsigned int irqmap = 0; + u32 buf; + int ret = 0; + + of_node = dev->udev->dev.parent->of_node; + + mutex_init(&dev->domain_data.irq_lock); + + lan78xx_read_reg(dev, INT_EP_CTL, &buf); + dev->domain_data.irqenable = buf; + + dev->domain_data.irqchip = &lan78xx_irqchip; + dev->domain_data.irq_handler = handle_simple_irq; + + irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0, + &chip_domain_ops, &dev->domain_data); + if (irqdomain) { + /* create mapping for PHY interrupt */ + irqmap = irq_create_mapping(irqdomain, INT_EP_PHY); + if (!irqmap) { + irq_domain_remove(irqdomain); + + irqdomain = NULL; + ret = -EINVAL; + } + } else { + ret = -EINVAL; + } + + dev->domain_data.irqdomain = irqdomain; + dev->domain_data.phyirq = irqmap; + + return ret; +} + +static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) +{ + if (dev->domain_data.phyirq > 0) { + irq_dispose_mapping(dev->domain_data.phyirq); + + if (dev->domain_data.irqdomain) + irq_domain_remove(dev->domain_data.irqdomain); + } + dev->domain_data.phyirq = 0; + dev->domain_data.irqdomain = NULL; +} + static int lan78xx_phy_init(struct lan78xx_net *dev) { int ret; @@@ -1945,15 -1846,15 +1945,15 @@@ return -EIO; }
- /* Enable PHY interrupts. - * We handle our own interrupt - */ - ret = phy_read(phydev, LAN88XX_INT_STS); - ret = phy_write(phydev, LAN88XX_INT_MASK, - LAN88XX_INT_MASK_MDINTPIN_EN_ | - LAN88XX_INT_MASK_LINK_CHANGE_); + /* if phyirq is not set, use polling mode in phylib */ + if (dev->domain_data.phyirq > 0) + phydev->irq = dev->domain_data.phyirq; + else + phydev->irq = 0; + netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
- phydev->irq = PHY_IGNORE_INTERRUPT; + /* set to AUTOMDIX */ + phydev->mdix = ETH_TP_MDI_AUTO;
ret = phy_connect_direct(dev->net, phydev, lan78xx_link_status_change, @@@ -1964,6 -1865,9 +1964,6 @@@ return -EIO; }
- /* set to AUTOMDIX */ - lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO); - /* MAC doesn't support 1000T Half */ phydev->supported &= ~SUPPORTED_1000baseT_Half;
@@@ -2066,6 -1970,11 +2066,6 @@@ static int lan78xx_change_mtu(struct ne int old_rx_urb_size = dev->rx_urb_size; int ret;
- if (new_mtu > MAX_SINGLE_PACKET_SIZE) - return -EINVAL; - - if (new_mtu <= 0) - return -EINVAL; /* no second zero-length packet read wanted after mtu-sized packets */ if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; @@@ -2341,6 -2250,11 +2341,6 @@@ static int lan78xx_reset(struct lan78xx buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; ret = lan78xx_write_reg(dev, MAC_CR, buf);
- /* enable PHY interrupts */ - ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); - buf |= INT_ENP_PHY_INT; - ret = lan78xx_write_reg(dev, INT_EP_CTL, buf); - ret = lan78xx_read_reg(dev, MAC_TX, &buf); buf |= MAC_TX_TXEN_; ret = lan78xx_write_reg(dev, MAC_TX, buf); @@@ -2749,14 -2663,6 +2749,14 @@@ static int lan78xx_bind(struct lan78xx_
dev->net->hw_features = dev->net->features;
+ ret = lan78xx_setup_irq_domain(dev); + if (ret < 0) { + netdev_warn(dev->net, + "lan78xx_setup_irq_domain() failed : %d", ret); + kfree(pdata); + return ret; + } + /* Init all registers */ ret = lan78xx_reset(dev);
@@@ -2773,8 -2679,6 +2773,8 @@@ static void lan78xx_unbind(struct lan78 { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ lan78xx_remove_irq_domain(dev); + lan78xx_remove_mdio(dev);
if (pdata) { @@@ -3474,9 -3378,6 +3474,9 @@@ static int lan78xx_probe(struct usb_int if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+ /* MTU range: 68 - 9000 */ + netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; @@@ -3494,6 -3395,7 +3494,7 @@@ if (buf) { dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_intr) { + ret = -ENOMEM; kfree(buf); goto out3; } else { diff --combined drivers/net/virtio_net.c index a21d93a,cbf1c61..b425fa1 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@@ -969,12 -969,17 +969,17 @@@ static int virtnet_set_mac_address(stru struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; - struct sockaddr *addr = p; + struct sockaddr *addr; struct scatterlist sg;
- ret = eth_prepare_mac_addr_change(dev, p); + addr = kmalloc(sizeof(*addr), GFP_KERNEL); + if (!addr) + return -ENOMEM; + memcpy(addr, p, sizeof(*addr)); + + ret = eth_prepare_mac_addr_change(dev, addr); if (ret) - return ret; + goto out;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); @@@ -982,7 -987,8 +987,8 @@@ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { @@@ -996,8 -1002,11 +1002,11 @@@ }
eth_commit_mac_addr_change(dev, p); + ret = 0;
- return 0; + out: + kfree(addr); + return ret; }
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, @@@ -1419,6 -1428,17 +1428,6 @@@ static const struct ethtool_ops virtnet .set_settings = virtnet_set_settings, };
-#define MIN_MTU 68 -#define MAX_MTU 65535 - -static int virtnet_change_mtu(struct net_device *dev, int new_mtu) -{ - if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} - static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@@ -1426,6 -1446,7 +1435,6 @@@ .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, - .ndo_change_mtu = virtnet_change_mtu, .ndo_get_stats64 = virtnet_stats, .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, @@@ -1741,9 -1762,6 +1750,9 @@@ static bool virtnet_validate_features(s return true; }
+#define MIN_MTU ETH_MIN_MTU +#define MAX_MTU ETH_MAX_MTU + static int virtnet_probe(struct virtio_device *vdev) { int i, err; @@@ -1817,10 -1835,6 +1826,10 @@@
dev->vlan_features = dev->features;
+ /* MTU range: 68 - 65535 */ + dev->min_mtu = MIN_MTU; + dev->max_mtu = MAX_MTU; + /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) virtio_cread_bytes(vdev, @@@ -1875,22 -1889,15 +1884,22 @@@ mtu = virtio_cread16(vdev, offsetof(struct virtio_net_config, mtu)); - if (virtnet_change_mtu(dev, mtu)) + if (mtu < dev->min_mtu) { __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); + } else { + dev->mtu = mtu; + dev->max_mtu = mtu; + } }
if (vi->any_header_sg) dev->needed_headroom = vi->hdr_len;
- /* Use single tx/rx queue pair as default */ - vi->curr_queue_pairs = 1; + /* Enable multiqueue by default */ + if (num_online_cpus() >= max_queue_pairs) + vi->curr_queue_pairs = max_queue_pairs; + else + vi->curr_queue_pairs = num_online_cpus(); vi->max_queue_pairs = max_queue_pairs;
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */ @@@ -1921,8 -1928,6 +1930,8 @@@ goto free_unregister_netdev; }
+ virtnet_set_affinity(vi); + /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { diff --combined net/batman-adv/translation-table.c index 447f949,0dc85eb..30ecbfb --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -56,6 -56,7 +56,6 @@@ #include "hard-interface.h" #include "hash.h" #include "log.h" -#include "multicast.h" #include "netlink.h" #include "originator.h" #include "packet.h" @@@ -646,7 -647,6 +646,7 @@@ bool batadv_tt_local_add(struct net_dev struct net *net = dev_net(soft_iface); struct batadv_softif_vlan *vlan; struct net_device *in_dev = NULL; + struct batadv_hard_iface *in_hardif = NULL; struct hlist_head *head; struct batadv_tt_orig_list_entry *orig_entry; int hash_added, table_size, packet_size_max; @@@ -658,9 -658,6 +658,9 @@@ if (ifindex != BATADV_NULL_IFINDEX) in_dev = dev_get_by_index(net, ifindex);
+ if (in_dev) + in_hardif = batadv_hardif_get_by_netdev(in_dev); + tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!is_multicast_ether_addr(addr)) @@@ -734,7 -731,7 +734,7 @@@ */ tt_local->common.flags = BATADV_TT_CLIENT_NEW; tt_local->common.vid = vid; - if (batadv_is_wifi_netdev(in_dev)) + if (batadv_is_wifi_hardif(in_hardif)) tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; kref_init(&tt_local->common.refcount); tt_local->last_seen = jiffies; @@@ -794,7 -791,7 +794,7 @@@ check_roaming */ remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
- if (batadv_is_wifi_netdev(in_dev)) + if (batadv_is_wifi_hardif(in_hardif)) tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; else tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI; @@@ -818,8 -815,6 +818,8 @@@
ret = true; out: + if (in_hardif) + batadv_hardif_put(in_hardif); if (in_dev) dev_put(in_dev); if (tt_local) @@@ -3287,7 -3282,7 +3287,7 @@@ static bool batadv_send_my_tt_response( &tvlv_tt_data, &tt_change, &tt_len); - if (!tt_len) + if (!tt_len || !tvlv_len) goto unlock;
/* Copy the last orig_node's OGM buffer */ @@@ -3305,7 -3300,7 +3305,7 @@@ &tvlv_tt_data, &tt_change, &tt_len); - if (!tt_len) + if (!tt_len || !tvlv_len) goto out;
/* fill the rest of the tvlv with the real TT entries */ @@@ -3800,6 -3795,9 +3800,6 @@@ static void batadv_tt_local_commit_chan { lockdep_assert_held(&bat_priv->tt.commit_lock);
- /* Update multicast addresses in local translation table */ - batadv_mcast_mla_update(bat_priv); - if (atomic_read(&bat_priv->tt.local_changes) < 1) { if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt)) batadv_tt_tvlv_container_update(bat_priv); @@@ -3837,8 -3835,8 +3837,8 @@@ void batadv_tt_local_commit_changes(str bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst, unsigned short vid) { - struct batadv_tt_local_entry *tt_local_entry = NULL; - struct batadv_tt_global_entry *tt_global_entry = NULL; + struct batadv_tt_local_entry *tt_local_entry; + struct batadv_tt_global_entry *tt_global_entry; struct batadv_softif_vlan *vlan; bool ret = false;
@@@ -3847,24 -3845,27 +3847,24 @@@ return false;
if (!atomic_read(&vlan->ap_isolation)) - goto out; + goto vlan_put;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid); if (!tt_local_entry) - goto out; + goto vlan_put;
tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid); if (!tt_global_entry) - goto out; + goto local_entry_put;
- if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) - goto out; - - ret = true; + if (_batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) + ret = true;
-out: + batadv_tt_global_entry_put(tt_global_entry); +local_entry_put: + batadv_tt_local_entry_put(tt_local_entry); +vlan_put: batadv_softif_vlan_put(vlan); - if (tt_global_entry) - batadv_tt_global_entry_put(tt_global_entry); - if (tt_local_entry) - batadv_tt_local_entry_put(tt_local_entry); return ret; }
diff --combined net/bridge/br_sysfs_br.c index c9d2e0a,f88c4df..a181482 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@@ -440,23 -440,6 +440,23 @@@ static ssize_t hash_max_store(struct de } static DEVICE_ATTR_RW(hash_max);
+static ssize_t multicast_igmp_version_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + + return sprintf(buf, "%u\n", br->multicast_igmp_version); +} + +static ssize_t multicast_igmp_version_store(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_igmp_version); +} +static DEVICE_ATTR_RW(multicast_igmp_version); + static ssize_t multicast_last_member_count_show(struct device *d, struct device_attribute *attr, char *buf) @@@ -659,25 -642,6 +659,25 @@@ static ssize_t multicast_stats_enabled_ return store_bridge_parm(d, buf, len, set_stats_enabled); } static DEVICE_ATTR_RW(multicast_stats_enabled); + +#if IS_ENABLED(CONFIG_IPV6) +static ssize_t multicast_mld_version_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + + return sprintf(buf, "%u\n", br->multicast_mld_version); +} + +static ssize_t multicast_mld_version_store(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_mld_version); +} +static DEVICE_ATTR_RW(multicast_mld_version); +#endif #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static ssize_t nf_call_iptables_show( @@@ -845,10 -809,6 +845,10 @@@ static struct attribute *bridge_attrs[ &dev_attr_multicast_query_response_interval.attr, &dev_attr_multicast_startup_query_interval.attr, &dev_attr_multicast_stats_enabled.attr, + &dev_attr_multicast_igmp_version.attr, +#if IS_ENABLED(CONFIG_IPV6) + &dev_attr_multicast_mld_version.attr, +#endif #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) &dev_attr_nf_call_iptables.attr, @@@ -938,6 -898,7 +938,7 @@@ int br_sysfs_addbr(struct net_device *d if (!br->ifobj) { pr_info("%s: can't add kobject (directory) %s/%s\n", __func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR); + err = -ENOMEM; goto out3; } return 0; diff --combined net/ipv4/fib_trie.c index 73a6270,e3665bf..1b0e7d1 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@@ -84,114 -84,25 +84,114 @@@ #include <trace/events/fib.h> #include "fib_lookup.h"
-static BLOCKING_NOTIFIER_HEAD(fib_chain); +static unsigned int fib_seq_sum(void) +{ + unsigned int fib_seq = 0; + struct net *net; + + rtnl_lock(); + for_each_net(net) + fib_seq += net->ipv4.fib_seq; + rtnl_unlock(); + + return fib_seq; +} + +static ATOMIC_NOTIFIER_HEAD(fib_chain); + +static int call_fib_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + info->net = net; + return nb->notifier_call(nb, event_type, info); +} + +static void fib_rules_notify(struct net *net, struct notifier_block *nb, + enum fib_event_type event_type) +{ +#ifdef CONFIG_IP_MULTIPLE_TABLES + struct fib_notifier_info info; + + if (net->ipv4.fib_has_custom_rules) + call_fib_notifier(nb, net, event_type, &info); +#endif +} + +static void fib_notify(struct net *net, struct notifier_block *nb, + enum fib_event_type event_type); + +static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, u32 dst, + int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id, u32 nlflags) +{ + struct fib_entry_notifier_info info = { + .dst = dst, + .dst_len = dst_len, + .fi = fi, + .tos = tos, + .type = type, + .tb_id = tb_id, + .nlflags = nlflags, + }; + return call_fib_notifier(nb, net, event_type, &info.info); +} + +static bool fib_dump_is_consistent(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb), + unsigned int fib_seq) +{ + atomic_notifier_chain_register(&fib_chain, nb); + if (fib_seq == fib_seq_sum()) + return true; + atomic_notifier_chain_unregister(&fib_chain, nb); + if (cb) + cb(nb); + return false; +}
-int register_fib_notifier(struct notifier_block *nb) +#define FIB_DUMP_MAX_RETRIES 5 +int register_fib_notifier(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb)) { - return blocking_notifier_chain_register(&fib_chain, nb); + int retries = 0; + + do { + unsigned int fib_seq = fib_seq_sum(); + struct net *net; + + /* Mutex semantics guarantee that every change done to + * FIB tries before we read the change sequence counter + * is now visible to us. + */ + rcu_read_lock(); + for_each_net_rcu(net) { + fib_rules_notify(net, nb, FIB_EVENT_RULE_ADD); + fib_notify(net, nb, FIB_EVENT_ENTRY_ADD); + } + rcu_read_unlock(); + + if (fib_dump_is_consistent(nb, cb, fib_seq)) + return 0; + } while (++retries < FIB_DUMP_MAX_RETRIES); + + return -EBUSY; } EXPORT_SYMBOL(register_fib_notifier);
int unregister_fib_notifier(struct notifier_block *nb) { - return blocking_notifier_chain_unregister(&fib_chain, nb); + return atomic_notifier_chain_unregister(&fib_chain, nb); } EXPORT_SYMBOL(unregister_fib_notifier);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type, struct fib_notifier_info *info) { + net->ipv4.fib_seq++; info->net = net; - return blocking_notifier_call_chain(&fib_chain, event_type, info); + return atomic_notifier_call_chain(&fib_chain, event_type, info); }
static int call_fib_entry_notifiers(struct net *net, @@@ -808,6 -719,13 +808,13 @@@ static unsigned char update_suffix(stru { unsigned char slen = tn->pos; unsigned long stride, i; + unsigned char slen_max; + + /* only vector 0 can have a suffix length greater than or equal to + * tn->pos + tn->bits, the second highest node will have a suffix + * length at most of tn->pos + tn->bits - 1 + */ + slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen);
/* search though the list of children looking for nodes that might * have a suffix greater than the one we currently have. This is @@@ -825,12 -743,8 +832,8 @@@ slen = n->slen; i &= ~(stride - 1);
- /* if slen covers all but the last bit we can stop here - * there will be nothing longer than that since only node - * 0 and 1 << (bits - 1) could have that as their suffix - * length. - */ - if ((slen + 1) >= (tn->pos + tn->bits)) + /* stop searching if we have hit the maximum possible value */ + if (slen >= slen_max) break; }
@@@ -1002,39 -916,27 +1005,27 @@@ static struct key_vector *resize(struc return collapse(t, tn);
/* update parent in case halve failed */ - tp = node_parent(tn); - - /* Return if at least one deflate was run */ - if (max_work != MAX_WORK) - return tp; - - /* push the suffix length to the parent node */ - if (tn->slen > tn->pos) { - unsigned char slen = update_suffix(tn); - - if (slen > tp->slen) - tp->slen = slen; - } - - return tp; + return node_parent(tn); }
- static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l) + static void node_pull_suffix(struct key_vector *tn, unsigned char slen) { - while ((tp->slen > tp->pos) && (tp->slen > l->slen)) { - if (update_suffix(tp) > l->slen) + unsigned char node_slen = tn->slen; + + while ((node_slen > tn->pos) && (node_slen > slen)) { + slen = update_suffix(tn); + if (node_slen == slen) break; - tp = node_parent(tp); + + tn = node_parent(tn); + node_slen = tn->slen; } }
- static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l) + static void node_push_suffix(struct key_vector *tn, unsigned char slen) { - /* if this is a new leaf then tn will be NULL and we can sort - * out parent suffix lengths as a part of trie_rebalance - */ - while (tn->slen < l->slen) { - tn->slen = l->slen; + while (tn->slen < slen) { + tn->slen = slen; tn = node_parent(tn); } } @@@ -1155,6 -1057,7 +1146,7 @@@ static int fib_insert_node(struct trie }
/* Case 3: n is NULL, and will just insert a new leaf */ + node_push_suffix(tp, new->fa_slen); NODE_INIT_PARENT(l, tp); put_child_root(tp, key, l); trie_rebalance(t, tp); @@@ -1196,7 -1099,7 +1188,7 @@@ static int fib_insert_alias(struct tri /* if we added to the tail node then we need to update slen */ if (l->slen < new->fa_slen) { l->slen = new->fa_slen; - leaf_push_suffix(tp, l); + node_push_suffix(tp, new->fa_slen); }
return 0; @@@ -1588,6 -1491,8 +1580,8 @@@ static void fib_remove_alias(struct tri * out parent suffix lengths as a part of trie_rebalance */ if (hlist_empty(&l->leaf)) { + if (tp->slen == l->slen) + node_pull_suffix(tp, tp->pos); put_child_root(tp, l->key, NULL); node_free(l); trie_rebalance(t, tp); @@@ -1600,7 -1505,7 +1594,7 @@@
/* update the trie with the latest suffix length */ l->slen = fa->fa_slen; - leaf_pull_suffix(tp, l); + node_pull_suffix(tp, fa->fa_slen); }
/* Caller must hold RTNL. */ @@@ -1872,6 -1777,10 +1866,10 @@@ void fib_table_flush_external(struct fi if (IS_TRIE(pn)) break;
+ /* update the suffix to address pulled leaves */ + if (pn->slen > pn->pos) + update_suffix(pn); + /* resize completed node */ pn = resize(t, pn); cindex = get_index(pkey, pn); @@@ -1938,6 -1847,10 +1936,10 @@@ int fib_table_flush(struct net *net, st if (IS_TRIE(pn)) break;
+ /* update the suffix to address pulled leaves */ + if (pn->slen > pn->pos) + update_suffix(pn); + /* resize completed node */ pn = resize(t, pn); cindex = get_index(pkey, pn); @@@ -1990,62 -1903,6 +1992,62 @@@ return found; }
+static void fib_leaf_notify(struct net *net, struct key_vector *l, + struct fib_table *tb, struct notifier_block *nb, + enum fib_event_type event_type) +{ + struct fib_alias *fa; + + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + struct fib_info *fi = fa->fa_info; + + if (!fi) + continue; + + /* local and main table can share the same trie, + * so don't notify twice for the same entry. + */ + if (tb->tb_id != fa->tb_id) + continue; + + call_fib_entry_notifier(nb, net, event_type, l->key, + KEYLENGTH - fa->fa_slen, fi, fa->fa_tos, + fa->fa_type, fa->tb_id, 0); + } +} + +static void fib_table_notify(struct net *net, struct fib_table *tb, + struct notifier_block *nb, + enum fib_event_type event_type) +{ + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *l, *tp = t->kv; + t_key key = 0; + + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { + fib_leaf_notify(net, l, tb, nb, event_type); + + key = l->key + 1; + /* stop in case of wrap around */ + if (key < l->key) + break; + } +} + +static void fib_notify(struct net *net, struct notifier_block *nb, + enum fib_event_type event_type) +{ + unsigned int h; + + for (h = 0; h < FIB_TABLE_HASHSZ; h++) { + struct hlist_head *head = &net->ipv4.fib_table_hash[h]; + struct fib_table *tb; + + hlist_for_each_entry_rcu(tb, head, tb_hlist) + fib_table_notify(net, tb, nb, event_type); + } +} + static void __trie_free_rcu(struct rcu_head *head) { struct fib_table *tb = container_of(head, struct fib_table, rcu); diff --combined net/ipv4/ping.c index d11129f,96b8e2b..5b2635e --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@@ -657,6 -657,10 +657,10 @@@ int ping_common_sendmsg(int family, str if (len > 0xFFFF) return -EMSGSIZE;
+ /* Must have at least a full ICMP header. */ + if (len < icmph_len) + return -EINVAL; + /* * Check the flags. */ @@@ -789,8 -793,7 +793,8 @@@ static int ping_v4_sendmsg(struct sock
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, - inet_sk_flowi_flags(sk), faddr, saddr, 0, 0); + inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, + sk->sk_uid);
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); diff --combined net/ipv4/tcp_input.c index fe668c1,c71d49c..6c79075 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -85,7 -85,6 +85,7 @@@ int sysctl_tcp_dsack __read_mostly = 1 int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); +EXPORT_SYMBOL(sysctl_tcp_timestamps);
/* rfc5961 challenge ack rate limiting */ int sysctl_tcp_challenge_ack_limit = 1000; @@@ -129,6 -128,23 +129,23 @@@ int sysctl_tcp_invalid_ratelimit __read #define REXMIT_LOST 1 /* retransmit packets marked lost */ #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
+ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) + { + static bool __once __read_mostly; + + if (!__once) { + struct net_device *dev; + + __once = true; + + rcu_read_lock(); + dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); + pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", + dev ? dev->name : "Unknown driver"); + rcu_read_unlock(); + } + } + /* Adapt the MSS value used to make delayed ack decision to the * real world. */ @@@ -145,7 -161,10 +162,10 @@@ static void tcp_measure_rcv_mss(struct */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { - icsk->icsk_ack.rcv_mss = len; + icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, + tcp_sk(sk)->advmss); + if (unlikely(icsk->icsk_ack.rcv_mss != len)) + tcp_gro_dev_warn(sk, skb); } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. @@@ -2395,7 -2414,10 +2415,7 @@@ static void tcp_undo_cwnd_reduction(str if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk);
- if (icsk->icsk_ca_ops->undo_cwnd) - tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); - else - tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); + tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
if (tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; @@@ -3179,9 -3201,6 +3199,9 @@@ static int tcp_clean_rtx_queue(struct s tp->lost_skb_hint = NULL; }
+ if (!skb) + tcp_chrono_stop(sk, TCP_CHRONO_BUSY); + if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una;
@@@ -3352,7 -3371,9 +3372,7 @@@ static void tcp_snd_una_update(struct t u32 delta = ack - tp->snd_una;
sock_owned_by_me((struct sock *)tp); - u64_stats_update_begin_raw(&tp->syncp); tp->bytes_acked += delta; - u64_stats_update_end_raw(&tp->syncp); tp->snd_una = ack; }
@@@ -3362,7 -3383,9 +3382,7 @@@ static void tcp_rcv_nxt_update(struct t u32 delta = seq - tp->rcv_nxt;
sock_owned_by_me((struct sock *)tp); - u64_stats_update_begin_raw(&tp->syncp); tp->bytes_received += delta; - u64_stats_update_end_raw(&tp->syncp); tp->rcv_nxt = seq; }
@@@ -5060,11 -5083,8 +5080,11 @@@ static void tcp_check_space(struct soc /* pairs with tcp_poll() */ smp_mb__after_atomic(); if (sk->sk_socket && - test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) + test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { tcp_new_space(sk); + if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) + tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); + } } }
@@@ -6298,7 -6318,13 +6318,7 @@@ int tcp_conn_request(struct request_soc goto drop; }
- - /* Accept backlog is full. If we have already queued enough - * of warm entries in syn queue, drop request. It is better than - * clogging syn queue with openreqs with exponentially increasing - * timeout. - */ - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { + if (sk_acceptq_is_full(sk)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } @@@ -6308,7 -6334,6 +6328,7 @@@ goto drop;
tcp_rsk(req)->af_specific = af_ops; + tcp_rsk(req)->ts_off = 0;
tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = af_ops->mss_clamp; @@@ -6330,9 -6355,6 +6350,9 @@@ if (security_inet_conn_request(sk, skb, req)) goto drop_and_free;
+ if (isn && tmp_opt.tstamp_ok) + af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); + if (!want_cookie && !isn) { /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering @@@ -6373,7 -6395,7 +6393,7 @@@ goto drop_and_release; }
- isn = af_ops->init_seq(skb); + isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); } if (!dst) { dst = af_ops->route_req(sk, &fl, req, NULL); @@@ -6385,7 -6407,6 +6405,7 @@@
if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); + tcp_rsk(req)->ts_off = 0; req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0;
linux-merge@lists.open-mesh.org