The following commit has been merged in the master branch: commit f9aa9dc7d2d00e6eb02168ffc64ef614b89d7998 Merge: 06b37b650cf826349677564cb0ff1560ed8e51fc 3b404a519815b9820f73f1ecf404e5546c9270ba Author: David S. Miller davem@davemloft.net Date: Tue Nov 22 11:29:28 2016 -0500
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
All conflicts were simple overlapping changes except perhaps for the Thunder driver.
That driver has a change_mtu method explicitly for sending a message to the hardware. If that fails it returns an error.
Normally a driver doesn't need an ndo_change_mtu method becuase those are usually just range changes, which are now handled generically. But since this extra operation is needed in the Thunder driver, it has to stay.
However, if the message send fails we have to restore the original MTU before the change because the entire call chain expects that if an error is thrown by ndo_change_mtu then the MTU did not change. Therefore code is added to nicvf_change_mtu to remember the original MTU, and to restore it upon nicvf_update_hw_max_frs() failue.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index 9870812,ad9b965..e589ae6 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -2530,8 -2530,6 +2530,8 @@@ L: netdev@vger.kernel.or L: linux-kernel@vger.kernel.org S: Supported F: kernel/bpf/ +F: tools/testing/selftests/bpf/ +F: lib/test_bpf.c
BROADCOM B44 10/100 ETHERNET DRIVER M: Michael Chan michael.chan@broadcom.com @@@ -7086,6 -7084,7 +7086,7 @@@ F: drivers/scsi/53c700 LED SUBSYSTEM M: Richard Purdie rpurdie@rpsys.net M: Jacek Anaszewski j.anaszewski@samsung.com + M: Pavel Machek pavel@ucw.cz L: linux-leds@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git S: Maintained @@@ -8450,6 -8449,7 +8451,6 @@@ F: include/uapi/linux/net_namespace. F: tools/net/ F: tools/testing/selftests/net/ F: lib/random32.c -F: lib/test_bpf.c
NETWORKING [IPv4/IPv6] M: "David S. Miller" davem@davemloft.net diff --combined drivers/infiniband/core/cma.c index c68f4fe,2a6fc47..22fcf28 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@@ -116,7 -116,7 +116,7 @@@ static LIST_HEAD(dev_list) static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); static struct workqueue_struct *cma_wq; -static int cma_pernet_id; +static unsigned int cma_pernet_id;
struct cma_pernet { struct idr tcp_ps; @@@ -2438,6 -2438,18 +2438,18 @@@ static int iboe_tos_to_sl(struct net_de return 0; }
+ static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, + unsigned long supported_gids, + enum ib_gid_type default_gid) + { + if ((network_type == RDMA_NETWORK_IPV4 || + network_type == RDMA_NETWORK_IPV6) && + test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) + return IB_GID_TYPE_ROCE_UDP_ENCAP; + + return default_gid; + } + static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; @@@ -2463,6 -2475,8 +2475,8 @@@ route->num_paths = 1;
if (addr->dev_addr.bound_dev_if) { + unsigned long supported_gids; + ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); if (!ndev) { ret = -ENODEV; @@@ -2486,7 -2500,12 +2500,12 @@@
route->path_rec->net = &init_net; route->path_rec->ifindex = ndev->ifindex; - route->path_rec->gid_type = id_priv->gid_type; + supported_gids = roce_gid_type_mask_support(id_priv->id.device, + id_priv->id.port_num); + route->path_rec->gid_type = + cma_route_gid_type(addr->dev_addr.network, + supported_gids, + id_priv->gid_type); } if (!ndev) { ret = -ENODEV; diff --combined drivers/infiniband/hw/mlx5/main.c index 76ed57f,32b09f0..2be65dd --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -1771,13 -1771,13 +1771,13 @@@ static int mlx5_ib_destroy_flow(struct mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { - mlx5_del_flow_rule(iter->rule); + mlx5_del_flow_rules(iter->rule); put_flow_table(dev, iter->prio, true); list_del(&iter->list); kfree(iter); }
- mlx5_del_flow_rule(handler->rule); + mlx5_del_flow_rules(handler->rule); put_flow_table(dev, handler->prio, true); mutex_unlock(&dev->flow_db.lock);
@@@ -1857,7 -1857,7 +1857,7 @@@ static struct mlx5_ib_flow_prio *get_fl ft = mlx5_create_auto_grouped_flow_table(ns, priority, num_entries, num_groups, - 0); + 0, 0);
if (!IS_ERR(ft)) { prio->refcount = 0; @@@ -1877,10 -1877,10 +1877,10 @@@ static struct mlx5_ib_flow_handler *cre { struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_ib_flow_handler *handler; + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); unsigned int spec_index; - u32 action; int err = 0;
if (!is_valid_attr(flow_attr)) @@@ -1905,12 -1905,12 +1905,12 @@@ }
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); - action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : + flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; - handler->rule = mlx5_add_flow_rule(ft, spec, - action, - MLX5_FS_DEFAULT_FLOW_TAG, - dst); + flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + handler->rule = mlx5_add_flow_rules(ft, spec, + &flow_act, + dst, 1);
if (IS_ERR(handler->rule)) { err = PTR_ERR(handler->rule); @@@ -1941,7 -1941,7 +1941,7 @@@ static struct mlx5_ib_flow_handler *cre handler_dst = create_flow_rule(dev, ft_prio, flow_attr, dst); if (IS_ERR(handler_dst)) { - mlx5_del_flow_rule(handler->rule); + mlx5_del_flow_rules(handler->rule); ft_prio->refcount--; kfree(handler); handler = handler_dst; @@@ -2004,7 -2004,7 +2004,7 @@@ static struct mlx5_ib_flow_handler *cre &leftovers_specs[LEFTOVERS_UC].flow_attr, dst); if (IS_ERR(handler_ucast)) { - mlx5_del_flow_rule(handler->rule); + mlx5_del_flow_rules(handler->rule); ft_prio->refcount--; kfree(handler); handler = handler_ucast; @@@ -2046,7 -2046,7 +2046,7 @@@ static struct mlx5_ib_flow_handler *cre return handler_rx;
err_tx: - mlx5_del_flow_rule(handler_rx->rule); + mlx5_del_flow_rules(handler_rx->rule); ft_rx->refcount--; kfree(handler_rx); err: @@@ -2311,14 -2311,14 +2311,14 @@@ static void mlx5_ib_event(struct mlx5_c { struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; struct ib_event ibev; - + bool fatal = false; u8 port = 0;
switch (event) { case MLX5_DEV_EVENT_SYS_ERROR: - ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; mlx5_ib_handle_internal_error(ibdev); + fatal = true; break;
case MLX5_DEV_EVENT_PORT_UP: @@@ -2358,8 -2358,6 +2358,8 @@@ ibev.event = IB_EVENT_CLIENT_REREGISTER; port = (u8)param; break; + default: + return; }
ibev.device = &ibdev->ib_dev; @@@ -2372,6 -2370,9 +2372,9 @@@
if (ibdev->ib_active) ib_dispatch_event(&ibev); + + if (fatal) + ibdev->ib_active = false; }
static void get_ext_port_caps(struct mlx5_ib_dev *dev) @@@ -3117,7 -3118,7 +3120,7 @@@ static void *mlx5_ib_add(struct mlx5_co } err = init_node_data(dev); if (err) - goto err_dealloc; + goto err_free_port;
mutex_init(&dev->flow_db.lock); mutex_init(&dev->cap_mask_mutex); @@@ -3127,7 -3128,7 +3130,7 @@@ if (ll == IB_LINK_LAYER_ETHERNET) { err = mlx5_enable_roce(dev); if (err) - goto err_dealloc; + goto err_free_port; }
err = create_dev_resources(&dev->devr); diff --combined drivers/infiniband/hw/mlx5/mlx5_ib.h index d5d0077,7d68990..854748b --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@@ -153,7 -153,7 +153,7 @@@ struct mlx5_ib_flow_handler struct list_head list; struct ib_flow ibflow; struct mlx5_ib_flow_prio *prio; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; };
struct mlx5_ib_flow_db { @@@ -626,6 -626,8 +626,8 @@@ struct mlx5_ib_dev struct mlx5_ib_resources devr; struct mlx5_mr_cache cache; struct timer_list delay_timer; + /* Prevents soft lock on massive reg MRs */ + struct mutex slow_path_mutex; int fill_delay; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING struct ib_odp_caps odp_caps; diff --combined drivers/net/ethernet/arc/emac_main.c index 95d8b3e,be865b4..abc9f2a --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@@ -460,7 -460,7 +460,7 @@@ static void arc_emac_set_rx_mode(struc if (ndev->flags & IFF_ALLMULTI) { arc_reg_set(priv, R_LAFL, ~0); arc_reg_set(priv, R_LAFH, ~0); - } else { + } else if (ndev->flags & IFF_MULTICAST) { struct netdev_hw_addr *ha; unsigned int filter[2] = { 0, 0 }; int bit; @@@ -472,6 -472,9 +472,9 @@@
arc_reg_set(priv, R_LAFL, filter[0]); arc_reg_set(priv, R_LAFH, filter[1]); + } else { + arc_reg_set(priv, R_LAFL, 0); + arc_reg_set(priv, R_LAFH, 0); } } } @@@ -633,7 -636,7 +636,7 @@@ static int arc_emac_tx(struct sk_buff * if (unlikely(dma_mapping_error(&ndev->dev, addr))) { stats->tx_dropped++; stats->tx_errors++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); @@@ -764,8 -767,6 +767,6 @@@ int arc_emac_probe(struct net_device *n ndev->netdev_ops = &arc_emac_netdev_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops; ndev->watchdog_timeo = TX_TIMEOUT; - /* FIXME :: no multicast support yet */ - ndev->flags &= ~IFF_MULTICAST;
priv = netdev_priv(ndev); priv->dev = dev; diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7401c90,e18635b..8c7bdbe --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -1499,7 -1499,6 +1499,7 @@@ static int bnxt_async_event_process(str netdev_warn(bp->dev, "Link speed %d no longer supported\n", speed); } + set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); /* fall thru */ } case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: @@@ -3425,7 -3424,13 +3425,7 @@@ static int bnxt_hwrm_vnic_set_rss(struc
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); if (set_rss) { - vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - - req.hash_type = cpu_to_le32(vnic->hash_type); - + req.hash_type = cpu_to_le32(bp->rss_hash_cfg); if (vnic->flags & BNXT_VNIC_RSS_FLAG) { if (BNXT_CHIP_TYPE_NITRO_A0(bp)) max_rings = bp->rx_nr_rings - 1; @@@ -4929,6 -4934,10 +4929,10 @@@ static void bnxt_del_napi(struct bnxt * napi_hash_del(&bnapi->napi); netif_napi_del(&bnapi->napi); } + /* We called napi_hash_del() before netif_napi_del(), we need + * to respect an RCU grace period before freeing napi structures. + */ + synchronize_net(); }
static void bnxt_init_napi(struct bnxt *bp) @@@ -4949,6 -4958,7 +4953,6 @@@ bnapi = bp->bnapi[cp_nr_rings]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0, 64); - napi_hash_add(&bnapi->napi); } } else { bnapi = bp->bnapi[0]; @@@ -5090,7 -5100,6 +5094,7 @@@ static int bnxt_update_link(struct bnx struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; u8 link_up = link_info->link_up; + u16 diff;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@@ -5178,23 -5187,6 +5182,23 @@@ link_info->link_up = 0; } mutex_unlock(&bp->hwrm_cmd_lock); + + diff = link_info->support_auto_speeds ^ link_info->advertising; + if ((link_info->support_auto_speeds | diff) != + link_info->support_auto_speeds) { + /* An advertised speed is no longer supported, so we need to + * update the advertisement settings. See bnxt_reset() for + * comments about the rtnl_lock() sequence below. + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); + link_info->advertising = link_info->support_auto_speeds; + if (test_bit(BNXT_STATE_OPEN, &bp->state) && + (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); + } return 0; }
@@@ -5359,7 -5351,7 +5363,7 @@@ static int bnxt_hwrm_shutdown_link(stru return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN); + req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
@@@ -5422,12 -5414,6 +5426,12 @@@ static int bnxt_update_phy_setting(stru update_link = true; }
+ /* The last close may have shutdown the link, so need to call + * PHY_CFG to bring it back up. + */ + if (!netif_carrier_ok(bp->dev)) + update_link = true; + if (!bnxt_eee_config_ok(bp)) update_eee = true;
@@@ -6121,10 -6107,6 +6125,10 @@@ static void bnxt_sp_task(struct work_st if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, + &bp->sp_event)) + bnxt_hwrm_phy_qcaps(bp); + rc = bnxt_update_link(bp, true); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", @@@ -6312,6 -6294,9 +6316,6 @@@ static int bnxt_change_mtu(struct net_d { struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9500) - return -EINVAL; - if (netif_running(dev)) bnxt_close_nic(bp, false, false);
@@@ -6890,10 -6875,6 +6894,10 @@@ static int bnxt_init_one(struct pci_de dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 60 - 9500 */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = 9500; + #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); #endif @@@ -6934,19 -6915,6 +6938,19 @@@ #endif bnxt_set_dflt_rings(bp);
+ /* Default RSS hash cfg. */ + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) && + !BNXT_CHIP_TYPE_NITRO_A0(bp) && + bp->hwrm_spec_code >= 0x10501) { + bp->flags |= BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { diff --combined drivers/net/ethernet/cadence/macb.c index 654b5bf,533653b..0e489bb --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@@ -32,28 -32,19 +32,28 @@@ #include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_net.h> - +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/tcp.h> #include "macb.h"
#define MACB_RX_BUFFER_SIZE 128 #define RX_BUFFER_MULTIPLE 64 /* bytes */ -#define RX_RING_SIZE 512 /* must be power of 2 */ -#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
-#define TX_RING_SIZE 128 /* must be power of 2 */ -#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) +#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ +#define MIN_RX_RING_SIZE 64 +#define MAX_RX_RING_SIZE 8192 +#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ + * (bp)->rx_ring_size) + +#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ +#define MIN_TX_RING_SIZE 64 +#define MAX_TX_RING_SIZE 4096 +#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ + * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */ -#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) +#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | MACB_BIT(ISR_ROVR)) @@@ -62,13 -53,10 +62,13 @@@ | MACB_BIT(TXERR)) #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
-#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) -#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) +/* Max length of transmit frame must be a multiple of 8 bytes */ +#define MACB_TX_LEN_ALIGN 8 +#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) +#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
-#define GEM_MTU_MIN_SIZE 68 +#define GEM_MTU_MIN_SIZE ETH_MIN_MTU +#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO)
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) @@@ -79,47 -67,45 +79,47 @@@ #define MACB_HALT_TIMEOUT 1230
/* Ring buffer accessors */ -static unsigned int macb_tx_ring_wrap(unsigned int index) +static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) { - return index & (TX_RING_SIZE - 1); + return index & (bp->tx_ring_size - 1); }
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, unsigned int index) { - return &queue->tx_ring[macb_tx_ring_wrap(index)]; + return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; }
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, unsigned int index) { - return &queue->tx_skb[macb_tx_ring_wrap(index)]; + return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; }
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) { dma_addr_t offset;
- offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); + offset = macb_tx_ring_wrap(queue->bp, index) * + sizeof(struct macb_dma_desc);
return queue->tx_ring_dma + offset; }
-static unsigned int macb_rx_ring_wrap(unsigned int index) +static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) { - return index & (RX_RING_SIZE - 1); + return index & (bp->rx_ring_size - 1); }
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) { - return &bp->rx_ring[macb_rx_ring_wrap(index)]; + return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; }
static void *macb_rx_buffer(struct macb *bp, unsigned int index) { - return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); + return bp->rx_buffers + bp->rx_buffer_size * + macb_rx_ring_wrap(bp, index); }
/* I/O accessors */ @@@ -622,8 -608,7 +622,8 @@@ static void macb_tx_error_task(struct w */ if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", - macb_tx_ring_wrap(tail), skb->data); + macb_tx_ring_wrap(bp, tail), + skb->data); bp->stats.tx_packets++; bp->stats.tx_bytes += skb->len; } @@@ -715,8 -700,7 +715,8 @@@ static void macb_tx_interrupt(struct ma /* First, update TX stats if needed */ if (skb) { netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", - macb_tx_ring_wrap(tail), skb->data); + macb_tx_ring_wrap(bp, tail), + skb->data); bp->stats.tx_packets++; bp->stats.tx_bytes += skb->len; } @@@ -736,7 -720,7 +736,7 @@@ queue->tx_tail = tail; if (__netif_subqueue_stopped(bp->dev, queue_index) && CIRC_CNT(queue->tx_head, queue->tx_tail, - TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) + bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) netif_wake_subqueue(bp->dev, queue_index); }
@@@ -747,8 -731,8 +747,8 @@@ static void gem_rx_refill(struct macb * dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, - RX_RING_SIZE) > 0) { - entry = macb_rx_ring_wrap(bp->rx_prepared_head); + bp->rx_ring_size) > 0) { + entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */ rmb(); @@@ -775,7 -759,7 +775,7 @@@
bp->rx_skbuff[entry] = skb;
- if (entry == RX_RING_SIZE - 1) + if (entry == bp->rx_ring_size - 1) paddr |= MACB_BIT(RX_WRAP); macb_set_addr(&(bp->rx_ring[entry]), paddr); bp->rx_ring[entry].ctrl = 0; @@@ -829,7 -813,7 +829,7 @@@ static int gem_rx(struct macb *bp, int dma_addr_t addr; bool rxused;
- entry = macb_rx_ring_wrap(bp->rx_tail); + entry = macb_rx_ring_wrap(bp, bp->rx_tail); desc = &bp->rx_ring[entry];
/* Make hw descriptor updates visible to CPU */ @@@ -911,8 -895,8 +911,8 @@@ static int macb_rx_frame(struct macb *b len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", - macb_rx_ring_wrap(first_frag), - macb_rx_ring_wrap(last_frag), len); + macb_rx_ring_wrap(bp, first_frag), + macb_rx_ring_wrap(bp, last_frag), len);
/* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the @@@ -985,12 -969,12 +985,12 @@@ static inline void macb_init_rx_ring(st int i;
addr = bp->rx_buffers_dma; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < bp->rx_ring_size; i++) { bp->rx_ring[i].addr = addr; bp->rx_ring[i].ctrl = 0; addr += bp->rx_buffer_size; } - bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); + bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); }
static int macb_rx(struct macb *bp, int budget) @@@ -1228,8 -1212,7 +1228,8 @@@ static void macb_poll_controller(struc
static unsigned int macb_tx_map(struct macb *bp, struct macb_queue *queue, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int hdrlen) { dma_addr_t mapping; unsigned int len, entry, i, tx_head = queue->tx_head; @@@ -1237,28 -1220,15 +1237,28 @@@ struct macb_dma_desc *desc; unsigned int offset, size, count = 0; unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int eof = 1; - u32 ctrl; + unsigned int eof = 1, mss_mfs = 0; + u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; + + /* LSO */ + if (skb_shinfo(skb)->gso_size != 0) { + if (ip_hdr(skb)->protocol == IPPROTO_UDP) + /* UDP - UFO */ + lso_ctrl = MACB_LSO_UFO_ENABLE; + else + /* TCP - TSO */ + lso_ctrl = MACB_LSO_TSO_ENABLE; + }
/* First, map non-paged data */ len = skb_headlen(skb); + + /* first buffer length */ + size = hdrlen; + offset = 0; while (len) { - size = min(len, bp->max_tx_length); - entry = macb_tx_ring_wrap(tx_head); + entry = macb_tx_ring_wrap(bp, tx_head); tx_skb = &queue->tx_skb[entry];
mapping = dma_map_single(&bp->pdev->dev, @@@ -1277,8 -1247,6 +1277,8 @@@ offset += size; count++; tx_head++; + + size = min(len, bp->max_tx_length); }
/* Then, map paged data from fragments */ @@@ -1289,7 -1257,7 +1289,7 @@@ offset = 0; while (len) { size = min(len, bp->max_tx_length); - entry = macb_tx_ring_wrap(tx_head); + entry = macb_tx_ring_wrap(bp, tx_head); tx_skb = &queue->tx_skb[entry];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, @@@ -1327,29 -1295,14 +1327,29 @@@ * to set the end of TX queue */ i = tx_head; - entry = macb_tx_ring_wrap(i); + entry = macb_tx_ring_wrap(bp, i); ctrl = MACB_BIT(TX_USED); desc = &queue->tx_ring[entry]; desc->ctrl = ctrl;
+ if (lso_ctrl) { + if (lso_ctrl == MACB_LSO_UFO_ENABLE) + /* include header and FCS in value given to h/w */ + mss_mfs = skb_shinfo(skb)->gso_size + + skb_transport_offset(skb) + + ETH_FCS_LEN; + else /* TSO */ { + mss_mfs = skb_shinfo(skb)->gso_size; + /* TCP Sequence Number Source Select + * can be set only for TSO + */ + seq_ctrl = 0; + } + } + do { i--; - entry = macb_tx_ring_wrap(i); + entry = macb_tx_ring_wrap(bp, i); tx_skb = &queue->tx_skb[entry]; desc = &queue->tx_ring[entry];
@@@ -1358,19 -1311,9 +1358,19 @@@ ctrl |= MACB_BIT(TX_LAST); eof = 0; } - if (unlikely(entry == (TX_RING_SIZE - 1))) + if (unlikely(entry == (bp->tx_ring_size - 1))) ctrl |= MACB_BIT(TX_WRAP);
+ /* First descriptor is header descriptor */ + if (i == queue->tx_head) { + ctrl |= MACB_BF(TX_LSO, lso_ctrl); + ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); + } else + /* Only set MSS/MFS on payload descriptors + * (second or later descriptor) + */ + ctrl |= MACB_BF(MSS_MFS, mss_mfs); + /* Set TX buffer descriptor */ macb_set_addr(desc, tx_skb->mapping); /* desc->addr must be visible to hardware before clearing @@@ -1396,43 -1339,6 +1396,43 @@@ dma_error return 0; }
+static netdev_features_t macb_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int nr_frags, f; + unsigned int hdrlen; + + /* Validate LSO compatibility */ + + /* there is only one buffer */ + if (!skb_is_nonlinear(skb)) + return features; + + /* length of header */ + hdrlen = skb_transport_offset(skb); + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + hdrlen += tcp_hdrlen(skb); + + /* For LSO: + * When software supplies two or more payload buffers all payload buffers + * apart from the last must be a multiple of 8 bytes in size. + */ + if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) + return features & ~MACB_NETIF_LSO; + + nr_frags = skb_shinfo(skb)->nr_frags; + /* No need to check last fragment */ + nr_frags--; + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) + return features & ~MACB_NETIF_LSO; + } + return features; +} + static inline int macb_clear_csum(struct sk_buff *skb) { /* no change for packets without checksum offloading */ @@@ -1457,28 -1363,7 +1457,28 @@@ static int macb_start_xmit(struct sk_bu struct macb *bp = netdev_priv(dev); struct macb_queue *queue = &bp->queues[queue_index]; unsigned long flags; - unsigned int count, nr_frags, frag_size, f; + unsigned int desc_cnt, nr_frags, frag_size, f; + unsigned int hdrlen; + bool is_lso, is_udp = 0; + + is_lso = (skb_shinfo(skb)->gso_size != 0); + + if (is_lso) { + is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); + + /* length of headers */ + if (is_udp) + /* only queue eth + ip headers separately for UDP */ + hdrlen = skb_transport_offset(skb); + else + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_headlen(skb) < hdrlen) { + netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); + /* if this is required, would need to copy to single buffer */ + return NETDEV_TX_BUSY; + } + } else + hdrlen = min(skb_headlen(skb), bp->max_tx_length);
#if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, @@@ -1493,22 -1378,17 +1493,22 @@@ * socket buffer: skb fragments of jumbo frames may need to be * split into many buffer descriptors. */ - count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); + if (is_lso && (skb_headlen(skb) > hdrlen)) + /* extra header descriptor if also payload in first buffer */ + desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; + else + desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); - count += DIV_ROUND_UP(frag_size, bp->max_tx_length); + desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); }
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */ - if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, + bp->tx_ring_size) < desc_cnt) { netif_stop_subqueue(dev, queue_index); spin_unlock_irqrestore(&bp->lock, flags); netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", @@@ -1522,7 -1402,7 +1522,7 @@@ }
/* Map socket buffer for DMA transfer */ - if (!macb_tx_map(bp, queue, skb)) { + if (!macb_tx_map(bp, queue, skb, hdrlen)) { dev_kfree_skb_any(skb); goto unlock; } @@@ -1534,7 -1414,7 +1534,7 @@@
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) netif_stop_subqueue(dev, queue_index);
unlock: @@@ -1573,7 -1453,7 +1573,7 @@@ static void gem_free_rx_buffers(struct if (!bp->rx_skbuff) return;
- for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < bp->rx_ring_size; i++) { skb = bp->rx_skbuff[i];
if (!skb) @@@ -1598,7 -1478,7 +1598,7 @@@ static void macb_free_rx_buffers(struc { if (bp->rx_buffers) { dma_free_coherent(&bp->pdev->dev, - RX_RING_SIZE * bp->rx_buffer_size, + bp->rx_ring_size * bp->rx_buffer_size, bp->rx_buffers, bp->rx_buffers_dma); bp->rx_buffers = NULL; } @@@ -1611,7 -1491,7 +1611,7 @@@ static void macb_free_consistent(struc
bp->macbgem_ops.mog_free_rx_buffers(bp); if (bp->rx_ring) { - dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, + dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; } @@@ -1620,7 -1500,7 +1620,7 @@@ kfree(queue->tx_skb); queue->tx_skb = NULL; if (queue->tx_ring) { - dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, + dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), queue->tx_ring, queue->tx_ring_dma); queue->tx_ring = NULL; } @@@ -1631,14 -1511,14 +1631,14 @@@ static int gem_alloc_rx_buffers(struct { int size;
- size = RX_RING_SIZE * sizeof(struct sk_buff *); + size = bp->rx_ring_size * sizeof(struct sk_buff *); bp->rx_skbuff = kzalloc(size, GFP_KERNEL); if (!bp->rx_skbuff) return -ENOMEM; - - netdev_dbg(bp->dev, - "Allocated %d RX struct sk_buff entries at %p\n", - RX_RING_SIZE, bp->rx_skbuff); + else + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + bp->rx_ring_size, bp->rx_skbuff); return 0; }
@@@ -1646,7 -1526,7 +1646,7 @@@ static int macb_alloc_rx_buffers(struc { int size;
- size = RX_RING_SIZE * bp->rx_buffer_size; + size = bp->rx_ring_size * bp->rx_buffer_size; bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) @@@ -1665,7 -1545,7 +1665,7 @@@ static int macb_alloc_consistent(struc int size;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - size = TX_RING_BYTES; + size = TX_RING_BYTES(bp); queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &queue->tx_ring_dma, GFP_KERNEL); @@@ -1676,13 -1556,13 +1676,13 @@@ q, size, (unsigned long)queue->tx_ring_dma, queue->tx_ring);
- size = TX_RING_SIZE * sizeof(struct macb_tx_skb); + size = bp->tx_ring_size * sizeof(struct macb_tx_skb); queue->tx_skb = kmalloc(size, GFP_KERNEL); if (!queue->tx_skb) goto out_err; }
- size = RX_RING_BYTES; + size = RX_RING_BYTES(bp); bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_ring_dma, GFP_KERNEL); if (!bp->rx_ring) @@@ -1708,11 -1588,11 +1708,11 @@@ static void gem_init_rings(struct macb int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - for (i = 0; i < TX_RING_SIZE; i++) { - macb_set_addr(&(queue->tx_ring[i]), 0); + for (i = 0; i < bp->tx_ring_size; i++) { + queue->tx_ring[i].addr = 0; queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); } - queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); queue->tx_head = 0; queue->tx_tail = 0; } @@@ -1729,13 -1609,13 +1729,13 @@@ static void macb_init_rings(struct mac
macb_init_rx_ring(bp);
- for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < bp->tx_ring_size; i++) { bp->queues[0].tx_ring[i].addr = 0; bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); } bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; - bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
bp->rx_tail = 0; } @@@ -2106,9 -1986,19 +2106,9 @@@ static int macb_close(struct net_devic
static int macb_change_mtu(struct net_device *dev, int new_mtu) { - struct macb *bp = netdev_priv(dev); - u32 max_mtu; - if (netif_running(dev)) return -EBUSY;
- max_mtu = ETH_DATA_LEN; - if (bp->caps & MACB_CAPS_JUMBO) - max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; - - if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE)) - return -EINVAL; - dev->mtu = new_mtu;
return 0; @@@ -2268,8 -2158,8 +2268,8 @@@ static void macb_get_regs(struct net_de regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) | MACB_GREGS_VERSION;
- tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); - head = macb_tx_ring_wrap(bp->queues[0].tx_head); + tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); + head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
regs_buff[0] = macb_readl(bp, NCR); regs_buff[1] = macb_or_gem_readl(bp, NCFGR); @@@ -2324,56 -2214,6 +2324,56 @@@ static int macb_set_wol(struct net_devi return 0; }
+static void macb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct macb *bp = netdev_priv(netdev); + + ring->rx_max_pending = MAX_RX_RING_SIZE; + ring->tx_max_pending = MAX_TX_RING_SIZE; + + ring->rx_pending = bp->rx_ring_size; + ring->tx_pending = bp->tx_ring_size; +} + +static int macb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct macb *bp = netdev_priv(netdev); + u32 new_rx_size, new_tx_size; + unsigned int reset = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_size = clamp_t(u32, ring->rx_pending, + MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); + new_rx_size = roundup_pow_of_two(new_rx_size); + + new_tx_size = clamp_t(u32, ring->tx_pending, + MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); + new_tx_size = roundup_pow_of_two(new_tx_size); + + if ((new_tx_size == bp->tx_ring_size) && + (new_rx_size == bp->rx_ring_size)) { + /* nothing to do */ + return 0; + } + + if (netif_running(bp->dev)) { + reset = 1; + macb_close(bp->dev); + } + + bp->rx_ring_size = new_rx_size; + bp->tx_ring_size = new_tx_size; + + if (reset) + macb_open(bp->dev); + + return 0; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@@ -2383,8 -2223,6 +2383,8 @@@ .set_wol = macb_set_wol, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = macb_get_ringparam, + .set_ringparam = macb_set_ringparam, };
static const struct ethtool_ops gem_ethtool_ops = { @@@ -2397,8 -2235,6 +2397,8 @@@ .get_sset_count = gem_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = macb_get_ringparam, + .set_ringparam = macb_set_ringparam, };
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@@ -2462,7 -2298,6 +2462,7 @@@ static const struct net_device_ops macb .ndo_poll_controller = macb_poll_controller, #endif .ndo_set_features = macb_set_features, + .ndo_features_check = macb_features_check, };
/* Configure peripheral capabilities according to device tree @@@ -2594,9 -2429,6 +2594,9 @@@ static int macb_init(struct platform_de int err; u32 val;
+ bp->tx_ring_size = DEFAULT_TX_RING_SIZE; + bp->rx_ring_size = DEFAULT_RX_RING_SIZE; + /* set the queue register mapping once for all: queue0 has a special * register mapping but we don't want to test the queue index then * compute the corresponding register offset at run time. @@@ -2669,11 -2501,6 +2669,11 @@@
/* Set features */ dev->hw_features = NETIF_F_SG; + + /* Check LSO capability */ + if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) + dev->hw_features |= MACB_NETIF_LSO; + /* Checksum offload is only available on gem with packet buffer */ if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; @@@ -2846,6 -2673,12 +2846,12 @@@ static int at91ether_start_xmit(struct lp->skb_length = skb->len; lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(NULL, lp->skb_physaddr)) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + netdev_err(dev, "%s: DMA mapping error\n", __func__); + return NETDEV_TX_OK; + }
/* Set address of the data in the Transmit Address register */ macb_writel(lp, TAR, lp->skb_physaddr); @@@ -2966,6 -2799,7 +2972,6 @@@ static const struct net_device_ops at91 .ndo_set_mac_address = eth_mac_addr, .ndo_do_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = at91ether_poll_controller, #endif @@@ -3200,13 -3034,6 +3206,13 @@@ static int macb_probe(struct platform_d goto err_out_free_netdev; }
+ /* MTU range: 68 - 1500 or 10240 */ + dev->min_mtu = GEM_MTU_MIN_SIZE; + if (bp->caps & MACB_CAPS_JUMBO) + dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; + else + dev->max_mtu = ETH_DATA_LEN; + mac = of_get_mac_address(np); if (mac) ether_addr_copy(bp->dev->dev_addr, mac); diff --combined drivers/net/ethernet/cavium/thunder/nicvf_main.c index b192712,8a37012..7c2c373 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@@ -69,25 -69,6 +69,6 @@@ static inline u8 nicvf_netdev_qidx(stru return qidx; }
- static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, - struct sk_buff *skb) - { - if (skb->len <= 64) - nic->drv_stats.rx_frames_64++; - else if (skb->len <= 127) - nic->drv_stats.rx_frames_127++; - else if (skb->len <= 255) - nic->drv_stats.rx_frames_255++; - else if (skb->len <= 511) - nic->drv_stats.rx_frames_511++; - else if (skb->len <= 1023) - nic->drv_stats.rx_frames_1023++; - else if (skb->len <= 1518) - nic->drv_stats.rx_frames_1518++; - else - nic->drv_stats.rx_frames_jumbo++; - } - /* The Cavium ThunderX network controller can *only* be found in SoCs * containing the ThunderX ARM64 CPU implementation. All accesses to the device * registers on this platform are implicitly strongly ordered with respect @@@ -492,9 -473,6 +473,6 @@@ int nicvf_set_real_num_queues(struct ne static int nicvf_init_resources(struct nicvf *nic) { int err; - union nic_mbx mbx = {}; - - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */ nicvf_qset_config(nic, true); @@@ -507,14 -485,10 +485,10 @@@ return err; }
- /* Send VF config done msg to PF */ - nicvf_write_to_mbx(nic, &mbx); - return 0; }
static void nicvf_snd_pkt_handler(struct net_device *netdev, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx, int cqe_type, int budget, unsigned int *tx_pkts, unsigned int *tx_bytes) @@@ -536,7 -510,7 +510,7 @@@ __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt);
- nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); + nicvf_check_cqe_tx_errs(nic, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; if (skb) { /* Check for dummy descriptor used for HW TSO offload on 88xx */ @@@ -630,8 -604,6 +604,6 @@@ static void nicvf_rcv_pkt_handler(struc return; }
- nicvf_set_rx_frame_cnt(nic, skb); - nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx); @@@ -703,7 -675,7 +675,7 @@@ loop work_done++; break; case CQE_TYPE_SEND: - nicvf_snd_pkt_handler(netdev, cq, + nicvf_snd_pkt_handler(netdev, (void *)cq_desc, CQE_TYPE_SEND, budget, &tx_pkts, &tx_bytes); tx_done++; @@@ -740,7 -712,7 +712,7 @@@ done nic = nic->pnicvf; if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { netif_tx_start_queue(txq); - nic->drv_stats.txq_wake++; + this_cpu_inc(nic->drv_stats->txq_wake); if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit queue wakeup SQ%d\n", @@@ -1084,7 -1056,7 +1056,7 @@@ static netdev_tx_t nicvf_xmit(struct sk
if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { netif_tx_stop_queue(txq); - nic->drv_stats.txq_stop++; + this_cpu_inc(nic->drv_stats->txq_stop); if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit ring full, stopping SQ%d\n", @@@ -1189,14 -1161,24 +1161,24 @@@ int nicvf_stop(struct net_device *netde return 0; }
+ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) + { + union nic_mbx mbx = {}; + + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; + mbx.frs.max_frs = mtu; + mbx.frs.vf_id = nic->vf_id; + + return nicvf_send_msg_to_pf(nic, &mbx); + } + int nicvf_open(struct net_device *netdev) { - int err, qidx; + int cpu, err, qidx; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - - nic->mtu = netdev->mtu; + union nic_mbx mbx = {};
netif_carrier_off(netdev);
@@@ -1248,9 -1230,17 +1230,17 @@@ if (nic->sqs_mode) nicvf_get_primary_vf_struct(nic);
- /* Configure receive side scaling */ - if (!nic->sqs_mode) + /* Configure receive side scaling and MTU */ + if (!nic->sqs_mode) { nicvf_rss_init(nic); + if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + goto cleanup; + + /* Clear percpu stats */ + for_each_possible_cpu(cpu) + memset(per_cpu_ptr(nic->drv_stats, cpu), 0, + sizeof(struct nicvf_drv_stats)); + }
err = nicvf_register_interrupts(nic); if (err) @@@ -1276,8 -1266,9 +1266,9 @@@ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
- nic->drv_stats.txq_stop = 0; - nic->drv_stats.txq_wake = 0; + /* Send VF config done msg to PF */ + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + nicvf_write_to_mbx(nic, &mbx);
return 0; cleanup: @@@ -1297,25 -1288,23 +1288,20 @@@ napi_del return err; }
- static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) - { - union nic_mbx mbx = {}; - - mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; - mbx.frs.max_frs = mtu; - mbx.frs.vf_id = nic->vf_id; - - return nicvf_send_msg_to_pf(nic, &mbx); - } - static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) { struct nicvf *nic = netdev_priv(netdev); - - if (new_mtu > NIC_HW_MAX_FRS) - return -EINVAL; - - if (new_mtu < NIC_HW_MIN_FRS) - return -EINVAL; ++ int orig_mtu = netdev->mtu;
- if (nicvf_update_hw_max_frs(nic, new_mtu)) - return -EINVAL; netdev->mtu = new_mtu; - nic->mtu = new_mtu; + + if (!netif_running(netdev)) + return 0; + - if (nicvf_update_hw_max_frs(nic, new_mtu)) ++ if (nicvf_update_hw_max_frs(nic, new_mtu)) { ++ netdev->mtu = orig_mtu; + return -EINVAL; ++ }
return 0; } @@@ -1373,9 -1362,10 +1359,10 @@@ void nicvf_update_lmac_stats(struct nic
void nicvf_update_stats(struct nicvf *nic) { - int qidx; + int qidx, cpu; + u64 tmp_stats = 0; struct nicvf_hw_stats *stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + struct nicvf_drv_stats *drv_stats; struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \ @@@ -1398,21 -1388,33 +1385,33 @@@ stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
- stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); - stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); - stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); - stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); + stats->tx_bytes = GET_TX_STATS(TX_OCTS); + stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); + stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); + stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); stats->tx_drops = GET_TX_STATS(TX_DROP);
- drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + - stats->tx_bcast_frames_ok + - stats->tx_mcast_frames_ok; - drv_stats->rx_frames_ok = stats->rx_ucast_frames + - stats->rx_bcast_frames + - stats->rx_mcast_frames; - drv_stats->rx_drops = stats->rx_drop_red + - stats->rx_drop_overrun; - drv_stats->tx_drops = stats->tx_drops; + /* On T88 pass 2.0, the dummy SQE added for TSO notification + * via CQE has 'dont_send' set. Hence HW drops the pkt pointed + * pointed by dummy SQE and results in tx_drops counter being + * incremented. Subtracting it from tx_tso counter will give + * exact tx_drops counter. + */ + if (nic->t88 && nic->hw_tso) { + for_each_possible_cpu(cpu) { + drv_stats = per_cpu_ptr(nic->drv_stats, cpu); + tmp_stats += drv_stats->tx_tso; + } + stats->tx_drops = tmp_stats - stats->tx_drops; + } + stats->tx_frames = stats->tx_ucast_frames + + stats->tx_bcast_frames + + stats->tx_mcast_frames; + stats->rx_frames = stats->rx_ucast_frames + + stats->rx_bcast_frames + + stats->rx_mcast_frames; + stats->rx_drops = stats->rx_drop_red + + stats->rx_drop_overrun;
/* Update RQ and SQ stats */ for (qidx = 0; qidx < qs->rq_cnt; qidx++) @@@ -1426,18 -1428,17 +1425,17 @@@ static struct rtnl_link_stats64 *nicvf_ { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes; - stats->rx_packets = drv_stats->rx_frames_ok; - stats->rx_dropped = drv_stats->rx_drops; + stats->rx_packets = hw_stats->rx_frames; + stats->rx_dropped = hw_stats->rx_drops; stats->multicast = hw_stats->rx_mcast_frames;
- stats->tx_bytes = hw_stats->tx_bytes_ok; - stats->tx_packets = drv_stats->tx_frames_ok; - stats->tx_dropped = drv_stats->tx_drops; + stats->tx_bytes = hw_stats->tx_bytes; + stats->tx_packets = hw_stats->tx_frames; + stats->tx_dropped = hw_stats->tx_drops;
return stats; } @@@ -1450,7 -1451,7 +1448,7 @@@ static void nicvf_tx_timeout(struct net netdev_warn(dev, "%s: Transmit timed out, resetting\n", dev->name);
- nic->drv_stats.tx_timeout++; + this_cpu_inc(nic->drv_stats->tx_timeout); schedule_work(&nic->reset_task); }
@@@ -1584,6 -1585,12 +1582,12 @@@ static int nicvf_probe(struct pci_dev * goto err_free_netdev; }
+ nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); + if (!nic->drv_stats) { + err = -ENOMEM; + goto err_free_netdev; + } + err = nicvf_set_qset_resources(nic); if (err) goto err_free_netdev; @@@ -1624,10 -1631,6 +1628,10 @@@ netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ /* MTU range: 64 - 9200 */ + netdev->min_mtu = NIC_HW_MIN_FRS; + netdev->max_mtu = NIC_HW_MAX_FRS; + INIT_WORK(&nic->reset_task, nicvf_reset_task);
err = register_netdev(netdev); @@@ -1646,6 -1649,8 +1650,8 @@@ err_unregister_interrupts nicvf_unregister_interrupts(nic); err_free_netdev: pci_set_drvdata(pdev, NULL); + if (nic->drv_stats) + free_percpu(nic->drv_stats); free_netdev(netdev); err_release_regions: pci_release_regions(pdev); @@@ -1673,6 -1678,8 +1679,8 @@@ static void nicvf_remove(struct pci_de unregister_netdev(pnetdev); nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); + if (nic->drv_stats) + free_percpu(nic->drv_stats); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --combined drivers/net/ethernet/chelsio/cxgb4/sge.c index b7d0753,e19a0ca..9f60647 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@@ -377,8 -377,8 +377,8 @@@ unmap: dma_unmap_page(dev, be64_to_cp * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held. */ -static void free_tx_desc(struct adapter *adap, struct sge_txq *q, - unsigned int n, bool unmap) +void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) { struct tx_sw_desc *d; unsigned int cidx = q->cidx; @@@ -1543,7 -1543,7 +1543,7 @@@ static inline unsigned int calc_tx_flit * inability to map packets. A periodic timer attempts to restart * queues so marked. */ -static void txq_stop_maperr(struct sge_ofld_txq *q) +static void txq_stop_maperr(struct sge_uld_txq *q) { q->mapping_err++; q->q.stops++; @@@ -1559,7 -1559,7 +1559,7 @@@ * Stops an offload Tx queue that has become full and modifies the packet * being written to request a wakeup. */ -static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb) { struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
@@@ -1586,7 -1586,7 +1586,7 @@@ * boolean "service_ofldq_running" to make sure that only one instance * is ever running at a time ... */ -static void service_ofldq(struct sge_ofld_txq *q) +static void service_ofldq(struct sge_uld_txq *q) { u64 *pos, *before, *end; int credits; @@@ -1706,7 -1706,7 +1706,7 @@@ * * Send an offload packet through an SGE offload queue. */ -static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) { skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ spin_lock(&q->sendq.lock); @@@ -1735,7 -1735,7 +1735,7 @@@ */ static void restart_ofldq(unsigned long data) { - struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + struct sge_uld_txq *q = (struct sge_uld_txq *)data;
spin_lock(&q->sendq.lock); q->full = 0; /* the queue actually is completely empty now */ @@@ -1767,23 -1767,17 +1767,23 @@@ static inline unsigned int is_ctrl_pkt( return skb->queue_mapping & 1; }
-static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +static inline int uld_send(struct adapter *adap, struct sk_buff *skb, + unsigned int tx_uld_type) { + struct sge_uld_txq_info *txq_info; + struct sge_uld_txq *txq; unsigned int idx = skb_txq(skb);
+ txq_info = adap->sge.uld_txq_info[tx_uld_type]; + txq = &txq_info->uldtxq[idx]; + if (unlikely(is_ctrl_pkt(skb))) { /* Single ctrl queue is a requirement for LE workaround path */ if (adap->tids.nsftids) idx = 0; return ctrl_xmit(&adap->sge.ctrlq[idx], skb); } - return ofld_xmit(&adap->sge.ofldtxq[idx], skb); + return ofld_xmit(txq, skb); }
/** @@@ -1800,7 -1794,7 +1800,7 @@@ int t4_ofld_send(struct adapter *adap, int ret;
local_bh_disable(); - ret = ofld_send(adap, skb); + ret = uld_send(adap, skb, CXGB4_TX_OFLD); local_bh_enable(); return ret; } @@@ -1819,39 -1813,6 +1819,39 @@@ int cxgb4_ofld_send(struct net_device * } EXPORT_SYMBOL(cxgb4_ofld_send);
+/** + * t4_crypto_send - send crypto packet + * @adap: the adapter + * @skb: the packet + * + * Sends crypto packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_crypto_send - send crypto packet + * @dev: the net device + * @skb: the packet + * + * Sends crypto packet. This is an exported version of @t4_crypto_send, + * intended for ULDs. + */ +int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_crypto_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_crypto_send); + static inline void copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, unsigned int offset) { @@@ -2518,7 -2479,7 +2518,7 @@@ static void sge_tx_timer_cb(unsigned lo for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) for (m = s->txq_maperr[i]; m; m &= m - 1) { unsigned long id = __ffs(m) + i * BITS_PER_LONG; - struct sge_ofld_txq *txq = s->egr_map[id]; + struct sge_uld_txq *txq = s->egr_map[id];
clear_bit(id, s->txq_maperr); tasklet_schedule(&txq->qresume_tsk); @@@ -2838,7 -2799,6 +2838,7 @@@ int t4_sge_alloc_eth_txq(struct adapte return ret; }
+ txq->q.q_type = CXGB4_TXQ_ETH; init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; @@@ -2892,7 -2852,6 +2892,7 @@@ int t4_sge_alloc_ctrl_txq(struct adapte return ret; }
+ txq->q.q_type = CXGB4_TXQ_CTRL; init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); txq->adap = adap; skb_queue_head_init(&txq->sendq); @@@ -2913,15 -2872,13 +2913,15 @@@ int t4_sge_mod_ctrl_txq(struct adapter return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); }
-int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, - struct net_device *dev, unsigned int iqid) +int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int uld_type) { int ret, nentries; struct fw_eq_ofld_cmd c; struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); + int cmd = FW_EQ_OFLD_CMD;
/* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@@ -2934,9 -2891,7 +2934,9 @@@ return -ENOMEM;
memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + if (unlikely(uld_type == CXGB4_TX_CRYPTO)) + cmd = FW_EQ_CTRL_CMD; + c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(adap->pf) | FW_EQ_OFLD_CMD_VFN_V(0)); @@@ -2964,7 -2919,6 +2964,7 @@@ return ret; }
+ txq->q.q_type = CXGB4_TXQ_ULD; init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->adap = adap; skb_queue_head_init(&txq->sendq); @@@ -2974,7 -2928,7 +2974,7 @@@ return 0; }
-static void free_txq(struct adapter *adap, struct sge_txq *q) +void free_txq(struct adapter *adap, struct sge_txq *q) { struct sge *s = &adap->sge;
@@@ -2997,7 -2951,6 +2997,6 @@@ void free_rspq_fl(struct adapter *adap rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); - napi_hash_del(&rq->napi); netif_napi_del(&rq->napi); rq->netdev = NULL; rq->cntxt_id = rq->abs_id = 0; @@@ -3072,6 -3025,21 +3071,6 @@@ void t4_free_sge_resources(struct adapt } }
- /* clean up offload Tx queues */ - for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { - struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; - - if (q->q.desc) { - tasklet_kill(&q->qresume_tsk); - t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, - q->q.cntxt_id); - free_tx_desc(adap, &q->q, q->q.in_use, false); - kfree(q->q.sdesc); - __skb_queue_purge(&q->sendq); - free_txq(adap, &q->q); - } - } - /* clean up control Tx queues */ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; @@@ -3124,34 -3092,12 +3123,34 @@@ void t4_sge_stop(struct adapter *adap if (s->tx_timer.function) del_timer_sync(&s->tx_timer);
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { - struct sge_ofld_txq *q = &s->ofldtxq[i]; + if (is_offload(adap)) { + struct sge_uld_txq_info *txq_info; + + txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; + if (txq_info) { + struct sge_uld_txq *txq = txq_info->uldtxq;
- if (q->q.desc) - tasklet_kill(&q->qresume_tsk); + for_each_ofldtxq(&adap->sge, i) { + if (txq->q.desc) + tasklet_kill(&txq->qresume_tsk); + } + } } + + if (is_pci_uld(adap)) { + struct sge_uld_txq_info *txq_info; + + txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; + if (txq_info) { + struct sge_uld_txq *txq = txq_info->uldtxq; + + for_each_ofldtxq(&adap->sge, i) { + if (txq->q.desc) + tasklet_kill(&txq->qresume_tsk); + } + } + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { struct sge_ctrl_txq *cq = &s->ctrlq[i];
diff --combined drivers/net/ethernet/emulex/benet/be_main.c index 3f6152c,93aa293..7e1633b --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -1406,6 -1406,23 +1406,6 @@@ drop return NETDEV_TX_OK; }
-static int be_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct be_adapter *adapter = netdev_priv(netdev); - struct device *dev = &adapter->pdev->dev; - - if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) { - dev_info(dev, "MTU must be between %d and %d bytes\n", - BE_MIN_MTU, BE_MAX_MTU); - return -EINVAL; - } - - dev_info(dev, "MTU changed from %d to %d bytes\n", - netdev->mtu, new_mtu); - netdev->mtu = new_mtu; - return 0; -} - static inline bool be_in_all_promisc(struct be_adapter *adapter) { return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) == @@@ -2796,7 -2813,6 +2796,6 @@@ static void be_evt_queues_destroy(struc if (eqo->q.created) { be_eq_clean(eqo); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); - napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); free_cpumask_var(eqo->affinity_mask); } @@@ -5199,6 -5215,7 +5198,6 @@@ static const struct net_device_ops be_n .ndo_start_xmit = be_xmit, .ndo_set_rx_mode = be_set_rx_mode, .ndo_set_mac_address = be_mac_addr_set, - .ndo_change_mtu = be_change_mtu, .ndo_get_stats64 = be_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = be_vlan_add_vid, @@@ -5248,10 -5265,6 +5247,10 @@@ static void be_netdev_init(struct net_d netdev->netdev_ops = &be_netdev_ops;
netdev->ethtool_ops = &be_ethtool_ops; + + /* MTU range: 256 - 9000 */ + netdev->min_mtu = BE_MIN_MTU; + netdev->max_mtu = BE_MAX_MTU; }
static void be_cleanup(struct be_adapter *adapter) diff --combined drivers/net/ethernet/marvell/sky2.c index aa60f4d,941c8e2..b60ad0e --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@@ -2398,6 -2398,16 +2398,6 @@@ static int sky2_change_mtu(struct net_d u16 ctl, mode; u32 imask;
- /* MTU size outside the spec */ - if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) - return -EINVAL; - - /* MTU > 1500 on yukon FE and FE+ not allowed */ - if (new_mtu > ETH_DATA_LEN && - (hw->chip_id == CHIP_ID_YUKON_FE || - hw->chip_id == CHIP_ID_YUKON_FE_P)) - return -EINVAL; - if (!netif_running(dev)) { dev->mtu = new_mtu; netdev_update_features(dev); @@@ -4798,14 -4808,6 +4798,14 @@@ static struct net_device *sky2_init_net
dev->features |= dev->hw_features;
+ /* MTU range: 60 - 1500 or 9000 */ + dev->min_mtu = ETH_ZLEN; + if (hw->chip_id == CHIP_ID_YUKON_FE || + hw->chip_id == CHIP_ID_YUKON_FE_P) + dev->max_mtu = ETH_DATA_LEN; + else + dev->max_mtu = ETH_JUMBO_MTU; + /* try to get mac address in the following order: * 1) from device tree data * 2) from internal registers set by bootloader @@@ -5218,6 -5220,19 +5218,19 @@@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, s
static void sky2_shutdown(struct pci_dev *pdev) { + struct sky2_hw *hw = pci_get_drvdata(pdev); + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *ndev = hw->dev[port]; + + rtnl_lock(); + if (netif_running(ndev)) { + dev_close(ndev); + netif_device_detach(ndev); + } + rtnl_unlock(); + } sky2_suspend(&pdev->dev); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); diff --combined drivers/net/ethernet/stmicro/stmmac/Kconfig index 6e9fcc3,4b78168..d37e32d --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@@ -69,17 -69,6 +69,17 @@@ config DWMAC_MESO the stmmac device driver. This driver is used for Meson6, Meson8, Meson8b and GXBB SoCs.
+config DWMAC_OXNAS + tristate "Oxford Semiconductor OXNAS dwmac support" + default ARCH_OXNAS + depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST) + select MFD_SYSCON + help + Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs. + + This selects the Oxford Semiconductor OXNASSoC glue layer support for + the stmmac device driver. This driver is used for OX820. + config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" default ARCH_ROCKCHIP @@@ -118,7 -107,7 +118,7 @@@ config DWMAC_ST config DWMAC_STM32 tristate "STM32 DWMAC support" default ARCH_STM32 - depends on OF && HAS_IOMEM + depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST) select MFD_SYSCON ---help--- Support for ethernet controller on STM32 SOCs. diff --combined drivers/net/ethernet/stmicro/stmmac/descs.h index 4000af4,e3c86d4..faeeef7 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@@ -87,7 -87,7 +87,7 @@@ #define TDES0_ERROR_SUMMARY BIT(15) #define TDES0_IP_HEADER_ERROR BIT(16) #define TDES0_TIME_STAMP_STATUS BIT(17) -#define TDES0_OWN BIT(31) +#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */ /* TDES1 */ #define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) #define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) @@@ -130,7 -130,7 +130,7 @@@ #define ETDES0_FIRST_SEGMENT BIT(28) #define ETDES0_LAST_SEGMENT BIT(29) #define ETDES0_INTERRUPT BIT(30) -#define ETDES0_OWN BIT(31) +#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */ /* TDES1 */ #define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) #define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) @@@ -155,30 -155,34 +155,34 @@@ #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
/* Extended RDES4 message type definitions */ - #define RDES_EXT_NO_PTP 0 - #define RDES_EXT_SYNC 1 - #define RDES_EXT_FOLLOW_UP 2 - #define RDES_EXT_DELAY_REQ 3 - #define RDES_EXT_DELAY_RESP 4 - #define RDES_EXT_PDELAY_REQ 5 - #define RDES_EXT_PDELAY_RESP 6 - #define RDES_EXT_PDELAY_FOLLOW_UP 7 + #define RDES_EXT_NO_PTP 0x0 + #define RDES_EXT_SYNC 0x1 + #define RDES_EXT_FOLLOW_UP 0x2 + #define RDES_EXT_DELAY_REQ 0x3 + #define RDES_EXT_DELAY_RESP 0x4 + #define RDES_EXT_PDELAY_REQ 0x5 + #define RDES_EXT_PDELAY_RESP 0x6 + #define RDES_EXT_PDELAY_FOLLOW_UP 0x7 + #define RDES_PTP_ANNOUNCE 0x8 + #define RDES_PTP_MANAGEMENT 0x9 + #define RDES_PTP_SIGNALING 0xa + #define RDES_PTP_PKT_RESERVED_TYPE 0xf
/* Basic descriptor structure for normal and alternate descriptors */ struct dma_desc { - unsigned int des0; - unsigned int des1; - unsigned int des2; - unsigned int des3; + __le32 des0; + __le32 des1; + __le32 des2; + __le32 des3; };
/* Extended descriptor structure (e.g. >= databook 3.50a) */ struct dma_extended_desc { struct dma_desc basic; /* Basic descriptors */ - unsigned int des4; /* Extended Status */ - unsigned int des5; /* Reserved */ - unsigned int des6; /* Tx/Rx Timestamp Low */ - unsigned int des7; /* Tx/Rx Timestamp High */ + __le32 des4; /* Extended Status */ + __le32 des5; /* Reserved */ + __le32 des6; /* Tx/Rx Timestamp Low */ + __le32 des7; /* Tx/Rx Timestamp High */ };
/* Transmit checksum insertion control */ diff --combined drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index bec72d3,a601f8d..a340fc8 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@@ -23,7 -23,7 +23,7 @@@ static int dwmac4_wrback_get_tx_status( unsigned int tdes3; int ret = tx_done;
- tdes3 = p->des3; + tdes3 = le32_to_cpu(p->des3);
/* Get tx owner first */ if (unlikely(tdes3 & TDES3_OWN)) @@@ -77,9 -77,9 +77,9 @@@ static int dwmac4_wrback_get_rx_status( struct dma_desc *p) { struct net_device_stats *stats = (struct net_device_stats *)data; - unsigned int rdes1 = p->des1; - unsigned int rdes2 = p->des2; - unsigned int rdes3 = p->des3; + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes2 = le32_to_cpu(p->des2); + unsigned int rdes3 = le32_to_cpu(p->des3); int message_type; int ret = good_frame;
@@@ -123,22 -123,29 +123,29 @@@ x->ipv4_pkt_rcvd++; if (rdes1 & RDES1_IPV6_HEADER) x->ipv6_pkt_rcvd++; - if (message_type == RDES_EXT_SYNC) - x->rx_msg_type_sync++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; else if (message_type == RDES_EXT_FOLLOW_UP) - x->rx_msg_type_follow_up++; + x->ptp_rx_msg_type_follow_up++; else if (message_type == RDES_EXT_DELAY_REQ) - x->rx_msg_type_delay_req++; + x->ptp_rx_msg_type_delay_req++; else if (message_type == RDES_EXT_DELAY_RESP) - x->rx_msg_type_delay_resp++; + x->ptp_rx_msg_type_delay_resp++; else if (message_type == RDES_EXT_PDELAY_REQ) - x->rx_msg_type_pdelay_req++; + x->ptp_rx_msg_type_pdelay_req++; else if (message_type == RDES_EXT_PDELAY_RESP) - x->rx_msg_type_pdelay_resp++; + x->ptp_rx_msg_type_pdelay_resp++; else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) - x->rx_msg_type_pdelay_follow_up++; - else - x->rx_msg_type_ext_no_ptp++; + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++;
if (rdes1 & RDES1_PTP_PACKET_TYPE) x->ptp_frame_type++; @@@ -169,76 -176,121 +176,122 @@@
static int dwmac4_rd_get_tx_len(struct dma_desc *p) { - return (p->des2 & TDES2_BUFFER1_SIZE_MASK); + return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK); }
static int dwmac4_get_tx_owner(struct dma_desc *p) { - return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT; + return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT; }
static void dwmac4_set_tx_owner(struct dma_desc *p) { - p->des3 |= TDES3_OWN; + p->des3 |= cpu_to_le32(TDES3_OWN); }
static void dwmac4_set_rx_owner(struct dma_desc *p) { - p->des3 |= RDES3_OWN; + p->des3 |= cpu_to_le32(RDES3_OWN); }
static int dwmac4_get_tx_ls(struct dma_desc *p) { - return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT; + return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR) + >> TDES3_LAST_DESCRIPTOR_SHIFT; }
static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) { - return (p->des3 & RDES3_PACKET_SIZE_MASK); + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK); }
static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) { - p->des2 |= TDES2_TIMESTAMP_ENABLE; + p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE); }
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) { - return (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) - >> TDES3_TIMESTAMP_STATUS_SHIFT; + /* Context type from W/B descriptor must be zero */ - if (p->des3 & TDES3_CONTEXT_TYPE) ++ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) + return -EINVAL; + + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ - if (p->des3 & TDES3_TIMESTAMP_STATUS) ++ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) + return 0; + + return 1; }
- /* NOTE: For RX CTX bit has to be checked before - * HAVE a specific function for TX and another one for RX - */ - static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) + static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; u64 ns;
- ns = p->des0; + ns = le32_to_cpu(p->des0); /* convert high/sec time stamp value to nanosecond */ - ns += p->des1 * 1000000000ULL; + ns += le32_to_cpu(p->des1) * 1000000000ULL;
return ns; }
- static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) + static int dwmac4_rx_check_timestamp(void *desc) + { + struct dma_desc *p = (struct dma_desc *)desc; + u32 own, ctxt; + int ret = 1; + + own = p->des3 & RDES3_OWN; + ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); + + if (likely(!own && ctxt)) { + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + /* Corrupted value */ + ret = -EINVAL; + else + /* A valid Timestamp is ready to be read */ + ret = 0; + } + + /* Timestamp not ready */ + return ret; + } + + static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; + int ret = -EINVAL; + + /* Get the status from normal w/b descriptor */ + if (likely(p->des3 & TDES3_RS1V)) { - if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) { ++ if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { + int i = 0; + + /* Check if timestamp is OK from context descriptor */ + do { + ret = dwmac4_rx_check_timestamp(desc); + if (ret < 0) + goto exit; + i++;
- return (le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE) - >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; + } while ((ret == 1) || (i < 10)); + + if (i == 10) + ret = -EBUSY; + } + } + exit: + return ret; }
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR; + p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic) - p->des3 |= RDES3_INT_ON_COMPLETION_EN; + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); }
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) @@@ -253,9 -305,9 +306,9 @@@ static void dwmac4_rd_prepare_tx_desc(s bool csum_flag, int mode, bool tx_own, bool ls) { - unsigned int tdes3 = p->des3; + unsigned int tdes3 = le32_to_cpu(p->des3);
- p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK); + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
if (is_fs) tdes3 |= TDES3_FIRST_DESCRIPTOR; @@@ -283,7 -335,7 +336,7 @@@ */ wmb();
- p->des3 = tdes3; + p->des3 = cpu_to_le32(tdes3); }
static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, @@@ -291,14 -343,14 +344,14 @@@ bool ls, unsigned int tcphdrlen, unsigned int tcppayloadlen) { - unsigned int tdes3 = p->des3; + unsigned int tdes3 = le32_to_cpu(p->des3);
if (len1) - p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK); + p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
if (len2) - p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) - & TDES2_BUFFER2_SIZE_MASK; + p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) + & TDES2_BUFFER2_SIZE_MASK);
if (is_fs) { tdes3 |= TDES3_FIRST_DESCRIPTOR | @@@ -326,7 -378,7 +379,7 @@@ */ wmb();
- p->des3 = tdes3; + p->des3 = cpu_to_le32(tdes3); }
static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) @@@ -337,7 -389,7 +390,7 @@@
static void dwmac4_rd_set_tx_ic(struct dma_desc *p) { - p->des2 |= TDES2_INTERRUPT_ON_COMPLETION; + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION); }
static void dwmac4_display_ring(void *head, unsigned int size, bool rx) @@@ -350,8 -402,7 +403,8 @@@ for (i = 0; i < size; i++) { pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(p), - p->des0, p->des1, p->des2, p->des3); + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; } } @@@ -360,8 -411,8 +413,8 @@@ static void dwmac4_set_mss_ctxt(struct { p->des0 = 0; p->des1 = 0; - p->des2 = mss; - p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV; + p->des2 = cpu_to_le32(mss); + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); }
const struct stmmac_desc_ops dwmac4_desc_ops = { @@@ -375,8 -426,8 +428,8 @@@ .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, - .get_timestamp = dwmac4_wrback_get_timestamp, - .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, + .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, + .get_timestamp = dwmac4_get_timestamp, .set_tx_ic = dwmac4_rd_set_tx_ic, .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, diff --combined drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 8295aa9,e755493..ce97e52 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@@ -30,7 -30,7 +30,7 @@@ static int enh_desc_get_tx_status(void struct dma_desc *p, void __iomem *ioaddr) { struct net_device_stats *stats = (struct net_device_stats *)data; - unsigned int tdes0 = p->des0; + unsigned int tdes0 = le32_to_cpu(p->des0); int ret = tx_done;
/* Get tx owner first */ @@@ -95,7 -95,7 +95,7 @@@
static int enh_desc_get_tx_len(struct dma_desc *p) { - return (p->des1 & ETDES1_BUFFER1_SIZE_MASK); + return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK); }
static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) @@@ -134,8 -134,8 +134,8 @@@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, struct dma_extended_desc *p) { - unsigned int rdes0 = p->basic.des0; - unsigned int rdes4 = p->des4; + unsigned int rdes0 = le32_to_cpu(p->basic.des0); + unsigned int rdes4 = le32_to_cpu(p->des4);
if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) { int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8; @@@ -150,22 -150,30 +150,30 @@@ x->ipv4_pkt_rcvd++; if (rdes4 & ERDES4_IPV6_PKT_RCVD) x->ipv6_pkt_rcvd++; - if (message_type == RDES_EXT_SYNC) - x->rx_msg_type_sync++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; else if (message_type == RDES_EXT_FOLLOW_UP) - x->rx_msg_type_follow_up++; + x->ptp_rx_msg_type_follow_up++; else if (message_type == RDES_EXT_DELAY_REQ) - x->rx_msg_type_delay_req++; + x->ptp_rx_msg_type_delay_req++; else if (message_type == RDES_EXT_DELAY_RESP) - x->rx_msg_type_delay_resp++; + x->ptp_rx_msg_type_delay_resp++; else if (message_type == RDES_EXT_PDELAY_REQ) - x->rx_msg_type_pdelay_req++; + x->ptp_rx_msg_type_pdelay_req++; else if (message_type == RDES_EXT_PDELAY_RESP) - x->rx_msg_type_pdelay_resp++; + x->ptp_rx_msg_type_pdelay_resp++; else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) - x->rx_msg_type_pdelay_follow_up++; - else - x->rx_msg_type_ext_no_ptp++; + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; + if (rdes4 & ERDES4_PTP_FRAME_TYPE) x->ptp_frame_type++; if (rdes4 & ERDES4_PTP_VER) @@@ -191,7 -199,7 +199,7 @@@ static int enh_desc_get_rx_status(void struct dma_desc *p) { struct net_device_stats *stats = (struct net_device_stats *)data; - unsigned int rdes0 = p->des0; + unsigned int rdes0 = le32_to_cpu(p->des0); int ret = good_frame;
if (unlikely(rdes0 & RDES0_OWN)) @@@ -257,8 -265,8 +265,8 @@@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des0 |= RDES0_OWN; - p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); + p->des0 |= cpu_to_le32(RDES0_OWN); + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); @@@ -266,12 -274,12 +274,12 @@@ ehn_desc_rx_set_on_ring(p, end);
if (disable_rx_ic) - p->des1 |= ERDES1_DISABLE_IC; + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); }
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des0 &= ~ETDES0_OWN; + p->des0 &= cpu_to_le32(~ETDES0_OWN); if (mode == STMMAC_CHAIN_MODE) enh_desc_end_tx_desc_on_chain(p); else @@@ -280,27 -288,27 +288,27 @@@
static int enh_desc_get_tx_owner(struct dma_desc *p) { - return (p->des0 & ETDES0_OWN) >> 31; + return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31; }
static void enh_desc_set_tx_owner(struct dma_desc *p) { - p->des0 |= ETDES0_OWN; + p->des0 |= cpu_to_le32(ETDES0_OWN); }
static void enh_desc_set_rx_owner(struct dma_desc *p) { - p->des0 |= RDES0_OWN; + p->des0 |= cpu_to_le32(RDES0_OWN); }
static int enh_desc_get_tx_ls(struct dma_desc *p) { - return (p->des0 & ETDES0_LAST_SEGMENT) >> 29; + return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29; }
static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) { - int ter = (p->des0 & ETDES0_END_RING) >> 21; + int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
memset(p, 0, offsetof(struct dma_desc, des2)); if (mode == STMMAC_CHAIN_MODE) @@@ -313,7 -321,7 +321,7 @@@ static void enh_desc_prepare_tx_desc(st bool csum_flag, int mode, bool tx_own, bool ls) { - unsigned int tdes0 = p->des0; + unsigned int tdes0 = le32_to_cpu(p->des0);
if (mode == STMMAC_CHAIN_MODE) enh_set_tx_desc_len_on_chain(p, len); @@@ -344,12 -352,12 +352,12 @@@ */ wmb();
- p->des0 = tdes0; + p->des0 = cpu_to_le32(tdes0); }
static void enh_desc_set_tx_ic(struct dma_desc *p) { - p->des0 |= ETDES0_INTERRUPT; + p->des0 |= cpu_to_le32(ETDES0_INTERRUPT); }
static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) @@@ -364,18 -372,18 +372,18 @@@ if (rx_coe_type == STMMAC_RX_COE_TYPE1) csum = 2;
- return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - - csum); + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) + >> RDES0_FRAME_LEN_SHIFT) - csum); }
static void enh_desc_enable_tx_timestamp(struct dma_desc *p) { - p->des0 |= ETDES0_TIME_STAMP_ENABLE; + p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE); }
static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) { - return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17; + return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; }
static u64 enh_desc_get_timestamp(void *desc, u32 ats) @@@ -384,13 -392,13 +392,13 @@@
if (ats) { struct dma_extended_desc *p = (struct dma_extended_desc *)desc; - ns = p->des6; + ns = le32_to_cpu(p->des6); /* convert high/sec time stamp value to nanosecond */ - ns += p->des7 * 1000000000ULL; + ns += le32_to_cpu(p->des7) * 1000000000ULL; } else { struct dma_desc *p = (struct dma_desc *)desc; - ns = p->des2; - ns += p->des3 * 1000000000ULL; + ns = le32_to_cpu(p->des2); + ns += le32_to_cpu(p->des3) * 1000000000ULL; }
return ns; @@@ -400,11 -408,10 +408,11 @@@ static int enh_desc_get_rx_timestamp_st { if (ats) { struct dma_extended_desc *p = (struct dma_extended_desc *)desc; - return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7; + return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7; } else { struct dma_desc *p = (struct dma_desc *)desc; - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) + if ((le32_to_cpu(p->des2) == 0xffffffff) && + (le32_to_cpu(p->des3) == 0xffffffff)) /* timestamp is corrupted, hence don't store it */ return 0; else diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac.h index 758b4e2,4d2a759..dbacb80 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@@ -90,6 -90,7 +90,6 @@@ struct stmmac_priv struct mac_device_info *hw; spinlock_t lock;
- struct phy_device *phydev ____cacheline_aligned_in_smp; int oldlink; int speed; int oldduplex; @@@ -128,6 -129,7 +128,7 @@@ int irq_wake; spinlock_t ptp_lock; void __iomem *mmcaddr; + void __iomem *ptpaddr; u32 rx_tail_addr; u32 tx_tail_addr; u32 mss; diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 0290d52,c5d0142..d5a8122 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@@ -115,14 -115,17 +115,17 @@@ static const struct stmmac_stats stmmac STMMAC_STAT(ip_csum_bypassed), STMMAC_STAT(ipv4_pkt_rcvd), STMMAC_STAT(ipv6_pkt_rcvd), - STMMAC_STAT(rx_msg_type_ext_no_ptp), - STMMAC_STAT(rx_msg_type_sync), - STMMAC_STAT(rx_msg_type_follow_up), - STMMAC_STAT(rx_msg_type_delay_req), - STMMAC_STAT(rx_msg_type_delay_resp), - STMMAC_STAT(rx_msg_type_pdelay_req), - STMMAC_STAT(rx_msg_type_pdelay_resp), - STMMAC_STAT(rx_msg_type_pdelay_follow_up), + STMMAC_STAT(no_ptp_rx_msg_type_ext), + STMMAC_STAT(ptp_rx_msg_type_sync), + STMMAC_STAT(ptp_rx_msg_type_follow_up), + STMMAC_STAT(ptp_rx_msg_type_delay_req), + STMMAC_STAT(ptp_rx_msg_type_delay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_req), + STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), + STMMAC_STAT(ptp_rx_msg_type_announce), + STMMAC_STAT(ptp_rx_msg_type_management), + STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), STMMAC_STAT(ptp_frame_type), STMMAC_STAT(ptp_ver), STMMAC_STAT(timestamp_dropped), @@@ -269,26 -272,25 +272,26 @@@ static void stmmac_ethtool_getdrvinfo(s strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); }
-static int stmmac_ethtool_getsettings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct stmmac_priv *priv = netdev_priv(dev); - struct phy_device *phy = priv->phydev; + struct phy_device *phy = dev->phydev; int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII || priv->hw->pcs & STMMAC_PCS_SGMII) { struct rgmii_adv adv; + u32 supported, advertising, lp_advertising;
if (!priv->xstats.pcs_link) { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; return 0; } - cmd->duplex = priv->xstats.pcs_duplex; + cmd->base.duplex = priv->xstats.pcs_duplex;
- ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); + cmd->base.speed = priv->xstats.pcs_speed;
/* Get and convert ADV/LP_ADV from the HW AN registers */ if (!priv->hw->mac->pcs_get_adv_lp) @@@ -298,59 -300,45 +301,59 @@@
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
+ ethtool_convert_link_mode_to_legacy_u32( + &supported, cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32( + &advertising, cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32( + &lp_advertising, cmd->link_modes.lp_advertising); + if (adv.pause & STMMAC_PCS_PAUSE) - cmd->advertising |= ADVERTISED_Pause; + advertising |= ADVERTISED_Pause; if (adv.pause & STMMAC_PCS_ASYM_PAUSE) - cmd->advertising |= ADVERTISED_Asym_Pause; + advertising |= ADVERTISED_Asym_Pause; if (adv.lp_pause & STMMAC_PCS_PAUSE) - cmd->lp_advertising |= ADVERTISED_Pause; + lp_advertising |= ADVERTISED_Pause; if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) - cmd->lp_advertising |= ADVERTISED_Asym_Pause; + lp_advertising |= ADVERTISED_Asym_Pause;
/* Reg49[3] always set because ANE is always supported */ - cmd->autoneg = ADVERTISED_Autoneg; - cmd->supported |= SUPPORTED_Autoneg; - cmd->advertising |= ADVERTISED_Autoneg; - cmd->lp_advertising |= ADVERTISED_Autoneg; + cmd->base.autoneg = ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg;
if (adv.duplex) { - cmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full | - SUPPORTED_10baseT_Full); - cmd->advertising |= (ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full); + supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Full); + advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); } else { - cmd->supported |= (SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); - cmd->advertising |= (ADVERTISED_1000baseT_Half | - ADVERTISED_100baseT_Half | - ADVERTISED_10baseT_Half); + supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); } if (adv.lp_duplex) - cmd->lp_advertising |= (ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full); + lp_advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); else - cmd->lp_advertising |= (ADVERTISED_1000baseT_Half | - ADVERTISED_100baseT_Half | - ADVERTISED_10baseT_Half); - cmd->port = PORT_OTHER; + lp_advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + cmd->base.port = PORT_OTHER; + + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, supported); + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.advertising, advertising); + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.lp_advertising, lp_advertising);
return 0; } @@@ -365,16 -353,16 +368,16 @@@ "link speed / duplex setting\n", dev->name); return -EBUSY; } - cmd->transceiver = XCVR_INTERNAL; - rc = phy_ethtool_gset(phy, cmd); + rc = phy_ethtool_ksettings_get(phy, cmd); return rc; }
-static int stmmac_ethtool_setsettings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +stmmac_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct stmmac_priv *priv = netdev_priv(dev); - struct phy_device *phy = priv->phydev; + struct phy_device *phy = dev->phydev; int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII || @@@ -382,7 -370,7 +385,7 @@@ u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
/* Only support ANE */ - if (cmd->autoneg != AUTONEG_ENABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL;
mask &= (ADVERTISED_1000baseT_Half | @@@ -404,7 -392,7 +407,7 @@@ }
spin_lock(&priv->lock); - rc = phy_ethtool_sset(phy, cmd); + rc = phy_ethtool_ksettings_set(phy, cmd); spin_unlock(&priv->lock);
return rc; @@@ -483,12 -471,12 +486,12 @@@ stmmac_get_pauseparam(struct net_devic if (!adv_lp.pause) return; } else { - if (!(priv->phydev->supported & SUPPORTED_Pause) || - !(priv->phydev->supported & SUPPORTED_Asym_Pause)) + if (!(netdev->phydev->supported & SUPPORTED_Pause) || + !(netdev->phydev->supported & SUPPORTED_Asym_Pause)) return; }
- pause->autoneg = priv->phydev->autoneg; + pause->autoneg = netdev->phydev->autoneg;
if (priv->flow_ctrl & FLOW_RX) pause->rx_pause = 1; @@@ -502,7 -490,7 +505,7 @@@ stmmac_set_pauseparam(struct net_devic struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); - struct phy_device *phy = priv->phydev; + struct phy_device *phy = netdev->phydev; int new_pause = FLOW_OFF;
if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { @@@ -562,7 -550,7 +565,7 @@@ static void stmmac_get_ethtool_stats(st } } if (priv->eee_enabled) { - int val = phy_get_eee_err(priv->phydev); + int val = phy_get_eee_err(dev->phydev); if (val) priv->xstats.phy_eee_wakeup_error_n = val; } @@@ -681,7 -669,7 +684,7 @@@ static int stmmac_ethtool_op_get_eee(st edata->eee_active = priv->eee_active; edata->tx_lpi_timer = priv->tx_lpi_timer;
- return phy_ethtool_get_eee(priv->phydev, edata); + return phy_ethtool_get_eee(dev->phydev, edata); }
static int stmmac_ethtool_op_set_eee(struct net_device *dev, @@@ -706,7 -694,7 +709,7 @@@ priv->tx_lpi_timer = edata->tx_lpi_timer; }
- return phy_ethtool_set_eee(priv->phydev, edata); + return phy_ethtool_set_eee(dev->phydev, edata); }
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) @@@ -865,12 -853,13 +868,12 @@@ static int stmmac_set_tunable(struct ne static const struct ethtool_ops stmmac_ethtool_ops = { .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, - .get_settings = stmmac_ethtool_getsettings, - .set_settings = stmmac_ethtool_setsettings, .get_msglevel = stmmac_ethtool_getmsglevel, .set_msglevel = stmmac_ethtool_setmsglevel, .get_regs = stmmac_ethtool_gregs, .get_regs_len = stmmac_ethtool_get_regs_len, .get_link = ethtool_op_get_link, + .nway_reset = phy_ethtool_nway_reset, .get_pauseparam = stmmac_get_pauseparam, .set_pauseparam = stmmac_set_pauseparam, .get_ethtool_stats = stmmac_get_ethtool_stats, @@@ -885,8 -874,6 +888,8 @@@ .set_coalesce = stmmac_set_coalesce, .get_tunable = stmmac_get_tunable, .set_tunable = stmmac_set_tunable, + .get_link_ksettings = stmmac_ethtool_get_link_ksettings, + .set_link_ksettings = stmmac_ethtool_set_link_ksettings, };
void stmmac_set_ethtool_ops(struct net_device *netdev) diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fbd1cd7,1f9ec02..29557d2 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -221,8 -221,7 +221,8 @@@ static inline u32 stmmac_rx_dirty(struc */ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) { - struct phy_device *phydev = priv->phydev; + struct net_device *ndev = priv->dev; + struct phy_device *phydev = ndev->phydev;
if (likely(priv->plat->fix_mac_speed)) priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); @@@ -280,7 -279,6 +280,7 @@@ static void stmmac_eee_ctrl_timer(unsig */ bool stmmac_eee_init(struct stmmac_priv *priv) { + struct net_device *ndev = priv->dev; unsigned long flags; bool ret = false;
@@@ -297,7 -295,7 +297,7 @@@ int tx_lpi_timer = priv->tx_lpi_timer;
/* Check if the PHY supports EEE */ - if (phy_init_eee(priv->phydev, 1)) { + if (phy_init_eee(ndev->phydev, 1)) { /* To manage at run-time if the EEE cannot be supported * anymore (for example because the lp caps have been * changed). @@@ -305,7 -303,7 +305,7 @@@ */ spin_lock_irqsave(&priv->lock, flags); if (priv->eee_active) { - pr_debug("stmmac: disable EEE\n"); + netdev_dbg(priv->dev, "disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); priv->hw->mac->set_eee_timer(priv->hw, 0, tx_lpi_timer); @@@ -329,12 -327,12 +329,12 @@@ tx_lpi_timer); } /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); + priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
ret = true; spin_unlock_irqrestore(&priv->lock, flags);
- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); } out: return ret; @@@ -342,18 -340,17 +342,17 @@@
/* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure - * @entry : descriptor index to be used. + * @p : descriptor pointer * @skb : the socket buffer * Description : * This function will read timestamp from the descriptor & pass it to stack. * and also perform some sanity checks. */ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, - unsigned int entry, struct sk_buff *skb) + struct dma_desc *p, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamp; u64 ns; - void *desc = NULL;
if (!priv->hwts_tx_en) return; @@@ -362,58 -359,55 +361,55 @@@ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) return;
- if (priv->adv_ts) - desc = (priv->dma_etx + entry); - else - desc = (priv->dma_tx + entry); - /* check tx tstamp status */ - if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) - return; + if (!priv->hw->desc->get_tx_timestamp_status(p)) { + /* get the valid tstamp */ + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get the valid tstamp */ - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns);
- memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamp.hwtstamp = ns_to_ktime(ns); - /* pass tstamp to stack */ - skb_tstamp_tx(skb, &shhwtstamp); + netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + }
return; }
/* stmmac_get_rx_hwtstamp - get HW RX timestamps * @priv: driver private structure - * @entry : descriptor index to be used. + * @p : descriptor pointer + * @np : next descriptor pointer * @skb : the socket buffer * Description : * This function will read received packet's timestamp from the descriptor * and pass it to stack. It also perform some sanity checks. */ - static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, - unsigned int entry, struct sk_buff *skb) + static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, + struct dma_desc *np, struct sk_buff *skb) { struct skb_shared_hwtstamps *shhwtstamp = NULL; u64 ns; - void *desc = NULL;
if (!priv->hwts_rx_en) return;
- if (priv->adv_ts) - desc = (priv->dma_erx + entry); - else - desc = (priv->dma_rx + entry); - - /* exit if rx tstamp is not valid */ - if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) - return; + /* Check if timestamp is available */ + if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { + /* For GMAC4, the valid timestamp is from CTX next desc. */ + if (priv->plat->has_gmac4) + ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); + else + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get valid tstamp */ - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); - shhwtstamp = skb_hwtstamps(skb); - memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamp->hwtstamp = ns_to_ktime(ns); + netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); + } else { + netdev_err(priv->dev, "cannot get RX hw timestamp\n"); + } }
/** @@@ -456,8 -450,8 +452,8 @@@ static int stmmac_hwtstamp_ioctl(struc sizeof(struct hwtstamp_config))) return -EFAULT;
- pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", - __func__, config.flags, config.tx_type, config.rx_filter); + netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter);
/* reserved for future extensions */ if (config.flags) @@@ -600,17 -594,18 +596,18 @@@ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en) - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); else { value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel); - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
/* program Sub Second Increment reg */ sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ioaddr, priv->clk_ptp_rate); + priv->ptpaddr, priv->clk_ptp_rate, + priv->plat->has_gmac4); temp = div_u64(1000000000ULL, sec_inc);
/* calculate default added value: @@@ -620,14 -615,14 +617,14 @@@ */ temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->clk_ptp_rate); - priv->hw->ptp->config_addend(priv->ioaddr, + priv->hw->ptp->config_addend(priv->ptpaddr, priv->default_addend);
/* initialize system time */ ktime_get_real_ts64(&now);
/* lower 32 bits of tv_sec are safe until y2106 */ - priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, + priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); }
@@@ -702,7 -697,7 +699,7 @@@ static void stmmac_release_ptp(struct s static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; @@@ -755,9 -750,9 +752,9 @@@ stmmac_hw_fix_mac_speed(priv); break; default: - if (netif_msg_link(priv)) - pr_warn("%s: Speed (%d) not 10/100\n", - dev->name, phydev->speed); + netif_warn(priv, link, priv->dev, + "Speed (%d) not 10/100\n", + phydev->speed); break; }
@@@ -810,10 -805,10 +807,10 @@@ static void stmmac_check_pcs_mode(struc (interface == PHY_INTERFACE_MODE_RGMII_ID) || (interface == PHY_INTERFACE_MODE_RGMII_RXID) || (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { - pr_debug("STMMAC: PCS RGMII support enable\n"); + netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); priv->hw->pcs = STMMAC_PCS_RGMII; } else if (interface == PHY_INTERFACE_MODE_SGMII) { - pr_debug("STMMAC: PCS SGMII support enable\n"); + netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); priv->hw->pcs = STMMAC_PCS_SGMII; } } @@@ -848,15 -843,15 +845,15 @@@ static int stmmac_init_phy(struct net_d
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->plat->phy_addr); - pr_debug("stmmac_init_phy: trying to attach to %s\n", - phy_id_fmt); + netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, + phy_id_fmt);
phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); }
if (IS_ERR_OR_NULL(phydev)) { - pr_err("%s: Could not attach to PHY\n", dev->name); + netdev_err(priv->dev, "Could not attach to PHY\n"); if (!phydev) return -ENODEV;
@@@ -889,8 -884,10 +886,8 @@@ if (phydev->is_pseudo_fixed_link) phydev->irq = PHY_POLL;
- pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" - " Link = %d\n", dev->name, phydev->phy_id, phydev->link); - - priv->phydev = phydev; + netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n", + __func__, phydev->phy_id, phydev->link);
return 0; } @@@ -976,8 -973,7 +973,8 @@@ static int stmmac_init_rx_buffers(struc
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); if (!skb) { - pr_err("%s: Rx init fails; skb is NULL\n", __func__); + netdev_err(priv->dev, + "%s: Rx init fails; skb is NULL\n", __func__); return -ENOMEM; } priv->rx_skbuff[i] = skb; @@@ -985,15 -981,15 +982,15 @@@ priv->dma_buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { - pr_err("%s: DMA mapping error\n", __func__); + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); dev_kfree_skb_any(skb); return -EINVAL; }
if (priv->synopsys_id >= DWMAC_CORE_4_00) - p->des0 = priv->rx_skbuff_dma[i]; + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]); else - p->des2 = priv->rx_skbuff_dma[i]; + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) @@@ -1035,14 -1031,13 +1032,14 @@@ static int init_dma_desc_rings(struct n
priv->dma_buf_sz = bfsize;
- if (netif_msg_probe(priv)) { - pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, - (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); + netif_dbg(priv, probe, priv->dev, + "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", + __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy); + + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n");
- /* RX INITIALIZATION */ - pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); - } for (i = 0; i < DMA_RX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) @@@ -1054,9 -1049,10 +1051,9 @@@ if (ret) goto err_init_rx_buffers;
- if (netif_msg_probe(priv)) - pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], - priv->rx_skbuff[i]->data, - (unsigned int)priv->rx_skbuff_dma[i]); + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", + priv->rx_skbuff[i], priv->rx_skbuff[i]->data, + (unsigned int)priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); @@@ -1341,7 -1337,7 +1338,7 @@@ static void stmmac_tx_clean(struct stmm priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; } - stmmac_get_tx_hwtstamp(priv, entry, skb); + stmmac_get_tx_hwtstamp(priv, p, skb); }
if (likely(priv->tx_skbuff_dma[entry].buf)) { @@@ -1386,8 -1382,8 +1383,8 @@@ netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { - if (netif_msg_tx_done(priv)) - pr_debug("%s: restart transmit\n", __func__); + netif_dbg(priv, tx_done, priv->dev, + "%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); } netif_tx_unlock(priv->dev); @@@ -1487,10 -1483,13 +1484,13 @@@ static void stmmac_mmc_setup(struct stm unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; - else + } else { + priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + }
dwmac_mmc_intr_all_mask(priv->mmcaddr);
@@@ -1498,7 -1497,7 +1498,7 @@@ dwmac_mmc_ctrl(priv->mmcaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else - pr_info(" No MAC Management Counters available\n"); + netdev_info(priv->dev, "No MAC Management Counters available\n"); }
/** @@@ -1511,18 -1510,18 +1511,18 @@@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { if (priv->plat->enh_desc) { - pr_info(" Enhanced/Alternate descriptors\n"); + dev_info(priv->device, "Enhanced/Alternate descriptors\n");
/* GMAC older than 3.50 has no extended descriptors */ if (priv->synopsys_id >= DWMAC_CORE_3_50) { - pr_info("\tEnabled extended descriptors\n"); + dev_info(priv->device, "Enabled extended descriptors\n"); priv->extend_desc = 1; } else - pr_warn("Extended descriptors not supported\n"); + dev_warn(priv->device, "Extended descriptors not supported\n");
priv->hw->desc = &enh_desc_ops; } else { - pr_info(" Normal descriptors\n"); + dev_info(priv->device, "Normal descriptors\n"); priv->hw->desc = &ndesc_ops; } } @@@ -1563,8 -1562,8 +1563,8 @@@ static void stmmac_check_ether_addr(str priv->dev->dev_addr, 0); if (!is_valid_ether_addr(priv->dev->dev_addr)) eth_hw_addr_random(priv->dev); - pr_info("%s: device MAC address %pM\n", priv->dev->name, - priv->dev->dev_addr); + netdev_info(priv->dev, "device MAC address %pM\n", + priv->dev->dev_addr); } }
@@@ -1672,8 -1671,7 +1672,8 @@@ static int stmmac_hw_setup(struct net_d /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { - pr_err("%s: DMA engine initialization failed\n", __func__); + netdev_err(priv->dev, "%s: DMA engine initialization failed\n", + __func__); return ret; }
@@@ -1702,7 -1700,7 +1702,7 @@@
ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { - pr_warn(" RX IPC Checksum Offload disabled\n"); + netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); priv->plat->rx_coe = STMMAC_RX_COE_NONE; priv->hw->rx_csum = 0; } @@@ -1727,11 -1725,10 +1727,11 @@@ #ifdef CONFIG_DEBUG_FS ret = stmmac_init_fs(dev); if (ret < 0) - pr_warn("%s: failed debugFS registration\n", __func__); + netdev_warn(priv->dev, "%s: failed debugFS registration\n", + __func__); #endif /* Start the ball rolling... */ - pr_debug("%s: DMA RX/TX processes started...\n", dev->name); + netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr);
@@@ -1786,9 -1783,8 +1786,9 @@@ static int stmmac_open(struct net_devic priv->hw->pcs != STMMAC_PCS_RTBI) { ret = stmmac_init_phy(dev); if (ret) { - pr_err("%s: Cannot attach to PHY (error: %d)\n", - __func__, ret); + netdev_err(priv->dev, + "%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); return ret; } } @@@ -1802,36 -1798,33 +1802,36 @@@
ret = alloc_dma_desc_resources(priv); if (ret < 0) { - pr_err("%s: DMA descriptors allocation failed\n", __func__); + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", + __func__); goto dma_desc_error; }
ret = init_dma_desc_rings(dev, GFP_KERNEL); if (ret < 0) { - pr_err("%s: DMA descriptors initialization failed\n", __func__); + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", + __func__); goto init_error; }
ret = stmmac_hw_setup(dev, true); if (ret < 0) { - pr_err("%s: Hw setup failed\n", __func__); + netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); goto init_error; }
stmmac_init_tx_coalesce(priv);
- if (priv->phydev) - phy_start(priv->phydev); + if (dev->phydev) + phy_start(dev->phydev);
/* Request the IRQ lines */ ret = request_irq(dev->irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { - pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", - __func__, dev->irq, ret); + netdev_err(priv->dev, + "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); goto init_error; }
@@@ -1840,9 -1833,8 +1840,9 @@@ ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { - pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", - __func__, priv->wol_irq, ret); + netdev_err(priv->dev, + "%s: ERROR: allocating the WoL IRQ %d (%d)\n", + __func__, priv->wol_irq, ret); goto wolirq_error; } } @@@ -1852,9 -1844,8 +1852,9 @@@ ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { - pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", - __func__, priv->lpi_irq, ret); + netdev_err(priv->dev, + "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); goto lpiirq_error; } } @@@ -1873,8 -1864,8 +1873,8 @@@ wolirq_error init_error: free_dma_desc_resources(priv); dma_desc_error: - if (priv->phydev) - phy_disconnect(priv->phydev); + if (dev->phydev) + phy_disconnect(dev->phydev);
return ret; } @@@ -1893,9 -1884,10 +1893,9 @@@ static int stmmac_release(struct net_de del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */ - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); }
netif_stop_queue(dev); @@@ -1955,7 -1947,7 +1955,7 @@@ static void stmmac_tso_allocator(struc priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); desc = priv->dma_tx + priv->cur_tx;
- desc->des0 = des + (total_len - tmp_len); + desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? TSO_MAX_BUFF_SIZE : tmp_len;
@@@ -2017,9 -2009,7 +2017,9 @@@ static netdev_tx_t stmmac_tso_xmit(stru if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ - pr_err("%s: Tx Ring full when queue awake\n", __func__); + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", + __func__); } spin_unlock(&priv->tx_lock); return NETDEV_TX_BUSY; @@@ -2059,11 -2049,11 +2059,11 @@@ priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); priv->tx_skbuff[first_entry] = skb;
- first->des0 = des; + first->des0 = cpu_to_le32(des);
/* Fill start of payload in buff2 of first descriptor */ if (pay_len) - first->des1 = des + proto_hdr_len; + first->des1 = cpu_to_le32(des + proto_hdr_len);
/* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; @@@ -2092,8 -2082,8 +2092,8 @@@ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { - if (netif_msg_hw(priv)) - pr_debug("%s: stop transmitted packets\n", __func__); + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); netif_stop_queue(dev); }
@@@ -2199,9 -2189,7 +2199,9 @@@ static netdev_tx_t stmmac_xmit(struct s if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ - pr_err("%s: Tx Ring full when queue awake\n", __func__); + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", + __func__); } return NETDEV_TX_BUSY; } @@@ -2254,11 -2242,13 +2254,11 @@@
priv->tx_skbuff[entry] = NULL;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { - desc->des0 = des; - priv->tx_skbuff_dma[entry].buf = desc->des0; - } else { - desc->des2 = des; - priv->tx_skbuff_dma[entry].buf = desc->des2; - } + priv->tx_skbuff_dma[entry].buf = des; + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + desc->des0 = cpu_to_le32(des); + else + desc->des2 = cpu_to_le32(des);
priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; @@@ -2276,10 -2266,9 +2276,10 @@@ if (netif_msg_pktdata(priv)) { void *tx_head;
- pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", - __func__, priv->cur_tx, priv->dirty_tx, first_entry, - entry, first, nfrags); + netdev_dbg(priv->dev, + "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", + __func__, priv->cur_tx, priv->dirty_tx, first_entry, + entry, first, nfrags);
if (priv->extend_desc) tx_head = (void *)priv->dma_etx; @@@ -2288,13 -2277,13 +2288,13 @@@
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
- pr_debug(">>> frame to be transmitted: "); + netdev_dbg(priv->dev, ">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); }
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { - if (netif_msg_hw(priv)) - pr_debug("%s: stop transmitted packets\n", __func__); + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); netif_stop_queue(dev); }
@@@ -2330,11 -2319,13 +2330,11 @@@ if (dma_mapping_error(priv->device, des)) goto dma_map_err;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { - first->des0 = des; - priv->tx_skbuff_dma[first_entry].buf = first->des0; - } else { - first->des2 = des; - priv->tx_skbuff_dma[first_entry].buf = first->des2; - } + priv->tx_skbuff_dma[first_entry].buf = des; + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + first->des0 = cpu_to_le32(des); + else + first->des2 = cpu_to_le32(des);
priv->tx_skbuff_dma[first_entry].len = nopaged_len; priv->tx_skbuff_dma[first_entry].last_segment = last_segment; @@@ -2371,7 -2362,7 +2371,7 @@@
dma_map_err: spin_unlock(&priv->tx_lock); - dev_err(priv->device, "Tx dma map failed\n"); + netdev_err(priv->dev, "Tx DMA map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; return NETDEV_TX_OK; @@@ -2442,16 -2433,16 +2442,16 @@@ static inline void stmmac_rx_refill(str DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[entry])) { - dev_err(priv->device, "Rx dma map failed\n"); + netdev_err(priv->dev, "Rx DMA map failed\n"); dev_kfree_skb(skb); break; }
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { - p->des0 = priv->rx_skbuff_dma[entry]; + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]); p->des1 = 0; } else { - p->des2 = priv->rx_skbuff_dma[entry]; + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]); } if (priv->hw->mode->refill_desc3) priv->hw->mode->refill_desc3(priv, p); @@@ -2459,8 -2450,8 +2459,8 @@@ if (priv->rx_zeroc_thresh > 0) priv->rx_zeroc_thresh--;
- if (netif_msg_rx_status(priv)) - pr_debug("\trefill entry #%d\n", entry); + netif_dbg(priv, rx_status, priv->dev, + "refill entry #%d\n", entry); } wmb();
@@@ -2493,7 -2484,7 +2493,7 @@@ static int stmmac_rx(struct stmmac_pri if (netif_msg_rx_status(priv)) { void *rx_head;
- pr_info(">>>>>> %s: descriptor ring:\n", __func__); + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); if (priv->extend_desc) rx_head = (void *)priv->dma_erx; else @@@ -2504,6 -2495,7 +2504,7 @@@ while (count < limit) { int status; struct dma_desc *p; + struct dma_desc *np;
if (priv->extend_desc) p = (struct dma_desc *)(priv->dma_erx + entry); @@@ -2523,9 -2515,11 +2524,11 @@@ next_entry = priv->cur_rx;
if (priv->extend_desc) - prefetch(priv->dma_erx + next_entry); + np = (struct dma_desc *)(priv->dma_erx + next_entry); else - prefetch(priv->dma_rx + next_entry); + np = priv->dma_rx + next_entry; + + prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) priv->hw->desc->rx_extended_status(&priv->dev->stats, @@@ -2552,9 -2546,9 +2555,9 @@@ unsigned int des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - des = p->des0; + des = le32_to_cpu(p->des0); else - des = p->des2; + des = le32_to_cpu(p->des2);
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
@@@ -2563,9 -2557,9 +2566,9 @@@ * ignored */ if (frame_len > priv->dma_buf_sz) { - pr_err("%s: len %d larger than size (%d)\n", - priv->dev->name, frame_len, - priv->dma_buf_sz); + netdev_err(priv->dev, + "len %d larger than size (%d)\n", + frame_len, priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; break; } @@@ -2577,11 -2571,11 +2580,11 @@@ frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) { - pr_info("\tdesc: %p [entry %d] buff=0x%x\n", - p, entry, des); + netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", + p, entry, des); if (frame_len > ETH_FRAME_LEN) - pr_debug("\tframe size %d, COE: %d\n", - frame_len, status); + netdev_dbg(priv->dev, "frame size %d, COE: %d\n", + frame_len, status); }
/* The zero-copy is always used for all the sizes @@@ -2618,9 -2612,8 +2621,9 @@@ } else { skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { - pr_err("%s: Inconsistent Rx chain\n", - priv->dev->name); + netdev_err(priv->dev, + "%s: Inconsistent Rx chain\n", + priv->dev->name); priv->dev->stats.rx_dropped++; break; } @@@ -2635,14 -2628,13 +2638,14 @@@ DMA_FROM_DEVICE); }
- stmmac_get_rx_hwtstamp(priv, entry, skb); - if (netif_msg_pktdata(priv)) { - pr_debug("frame received (%dbytes)", frame_len); + netdev_dbg(priv->dev, "frame received (%dbytes)", + frame_len); print_pkt(skb->data, frame_len); }
+ stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev); @@@ -2737,12 -2729,26 +2740,12 @@@ static void stmmac_set_rx_mode(struct n static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); - int max_mtu;
if (netif_running(dev)) { - pr_err("%s: must be stopped to change its MTU\n", dev->name); + netdev_err(priv->dev, "must be stopped to change its MTU\n"); return -EBUSY; }
- if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) - max_mtu = JUMBO_LEN; - else - max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); - - if (priv->plat->maxmtu < max_mtu) - max_mtu = priv->plat->maxmtu; - - if ((new_mtu < 46) || (new_mtu > max_mtu)) { - pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); - return -EINVAL; - } - dev->mtu = new_mtu;
netdev_update_features(dev); @@@ -2818,7 -2824,7 +2821,7 @@@ static irqreturn_t stmmac_interrupt(in pm_wakeup_event(priv->device, 0);
if (unlikely(!dev)) { - pr_err("%s: invalid dev pointer\n", __func__); + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); return IRQ_NONE; }
@@@ -2876,6 -2882,7 +2879,6 @@@ static void stmmac_poll_controller(stru */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct stmmac_priv *priv = netdev_priv(dev); int ret = -EOPNOTSUPP;
if (!netif_running(dev)) @@@ -2885,9 -2892,9 +2888,9 @@@ case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - if (!priv->phydev) + if (!dev->phydev) return -EINVAL; - ret = phy_mii_ioctl(priv->phydev, rq, cmd); + ret = phy_mii_ioctl(dev->phydev, rq, cmd); break; case SIOCSHWTSTAMP: ret = stmmac_hwtstamp_ioctl(dev, rq); @@@ -2915,17 -2922,14 +2918,17 @@@ static void sysfs_display_ring(void *he x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), - ep->basic.des0, ep->basic.des1, - ep->basic.des2, ep->basic.des3); + le32_to_cpu(ep->basic.des0), + le32_to_cpu(ep->basic.des1), + le32_to_cpu(ep->basic.des2), + le32_to_cpu(ep->basic.des3)); ep++; } else { x = *(u64 *) p; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), - p->des0, p->des1, p->des2, p->des3); + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; } seq_printf(seq, "\n"); @@@ -3050,7 -3054,8 +3053,7 @@@ static int stmmac_init_fs(struct net_de priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { - pr_err("ERROR %s/%s, debugfs create directory failed\n", - STMMAC_RESOURCE_NAME, dev->name); + netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
return -ENOMEM; } @@@ -3062,7 -3067,7 +3065,7 @@@ &stmmac_rings_status_fops);
if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { - pr_info("ERROR creating stmmac ring debugfs file\n"); + netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM; @@@ -3074,7 -3079,7 +3077,7 @@@ dev, &stmmac_dma_cap_fops);
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { - pr_info("ERROR creating stmmac MMC debugfs file\n"); + netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM; @@@ -3146,11 -3151,11 +3149,11 @@@ static int stmmac_hw_init(struct stmmac } else { if (chain_mode) { priv->hw->mode = &chain_mode_ops; - pr_info(" Chain mode enabled\n"); + dev_info(priv->device, "Chain mode enabled\n"); priv->mode = STMMAC_CHAIN_MODE; } else { priv->hw->mode = &ring_mode_ops; - pr_info(" Ring mode enabled\n"); + dev_info(priv->device, "Ring mode enabled\n"); priv->mode = STMMAC_RING_MODE; } } @@@ -3158,7 -3163,7 +3161,7 @@@ /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); if (priv->hw_cap_support) { - pr_info(" DMA HW capability register supported"); + dev_info(priv->device, "DMA HW capability register supported\n");
/* We can override some gmac/dma configuration fields: e.g. * enh_desc, tx_coe (e.g. that are passed through the @@@ -3183,9 -3188,8 +3186,9 @@@ else if (priv->dma_cap.rx_coe_type1) priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
- } else - pr_info(" No HW DMA feature register supported"); + } else { + dev_info(priv->device, "No HW DMA feature register supported\n"); + }
/* To use alternate (extended), normal or GMAC4 descriptor structures */ if (priv->synopsys_id >= DWMAC_CORE_4_00) @@@ -3195,20 -3199,20 +3198,20 @@@
if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; - pr_info(" RX Checksum Offload Engine supported\n"); + dev_info(priv->device, "RX Checksum Offload Engine supported\n"); if (priv->synopsys_id < DWMAC_CORE_4_00) - pr_info("\tCOE Type %d\n", priv->hw->rx_csum); + dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); } if (priv->plat->tx_coe) - pr_info(" TX Checksum insertion supported\n"); + dev_info(priv->device, "TX Checksum insertion supported\n");
if (priv->plat->pmt) { - pr_info(" Wake-Up On Lan supported\n"); + dev_info(priv->device, "Wake-Up On Lan supported\n"); device_set_wakeup_capable(priv->device, 1); }
if (priv->dma_cap.tsoen) - pr_info(" TSO supported\n"); + dev_info(priv->device, "TSO supported\n");
return 0; } @@@ -3267,8 -3271,8 +3270,8 @@@ int stmmac_dvr_probe(struct device *dev
priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { - dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", - __func__); + netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n", + __func__); /* If failed to obtain stmmac_clk and specific clk_csr value * is NOT passed from the platform, probe fail. */ @@@ -3317,7 -3321,7 +3320,7 @@@ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO; priv->tso = true; - pr_info(" TSO feature enabled\n"); + dev_info(priv->device, "TSO feature enabled\n"); } ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); @@@ -3327,15 -3331,6 +3330,15 @@@ #endif priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* MTU range: 46 - hw-specific max */ + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + ndev->max_mtu = JUMBO_LEN; + else + ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); + if (priv->plat->maxmtu < ndev->max_mtu) + ndev->max_mtu = priv->plat->maxmtu; + if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@@ -3346,7 -3341,7 +3349,7 @@@ */ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; - pr_info(" Enable RX Mitigation via HW Watchdog Timer\n"); + netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n"); }
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); @@@ -3356,8 -3351,7 +3359,8 @@@
ret = register_netdev(ndev); if (ret) { - pr_err("%s: ERROR %i registering the device\n", __func__, ret); + netdev_err(priv->dev, "%s: ERROR %i registering the device\n", + __func__, ret); goto error_netdev_register; }
@@@ -3380,9 -3374,8 +3383,9 @@@ /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { - pr_debug("%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); + netdev_err(priv->dev, + "%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); goto error_mdio_register; } } @@@ -3415,7 -3408,7 +3418,7 @@@ int stmmac_dvr_remove(struct device *de struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev);
- pr_info("%s:\n\tremoving driver", __func__); + netdev_info(priv->dev, "%s: removing driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); @@@ -3454,8 -3447,8 +3457,8 @@@ int stmmac_suspend(struct device *dev if (!ndev || !netif_running(ndev)) return 0;
- if (priv->phydev) - phy_stop(priv->phydev); + if (ndev->phydev) + phy_stop(ndev->phydev);
spin_lock_irqsave(&priv->lock, flags);
@@@ -3549,8 -3542,8 +3552,8 @@@ int stmmac_resume(struct device *dev
spin_unlock_irqrestore(&priv->lock, flags);
- if (priv->phydev) - phy_start(priv->phydev); + if (ndev->phydev) + phy_start(ndev->phydev);
return 0; } diff --combined drivers/net/ethernet/sun/sunbmac.c index ea89ef3,02f4527..c4caf48 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@@ -623,6 -623,7 +623,7 @@@ static int bigmac_init_hw(struct bigma void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; + __u32 bblk_dvma = (__u32)bp->bblock_dvma; unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */ @@@ -671,9 -672,9 +672,9 @@@ bregs + BMAC_XIFCFG);
/* Tell the QEC where the ring descriptors are. */ - sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), + sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); - sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), + sbus_writel(bblk_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS);
/* Setup the FIFO pointers into QEC local memory. */ @@@ -1064,6 -1065,7 +1065,6 @@@ static const struct net_device_ops bigm .ndo_get_stats = bigmac_get_stats, .ndo_set_rx_mode = bigmac_set_multicast, .ndo_tx_timeout = bigmac_tx_timeout, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --combined drivers/net/ethernet/sun/sunqe.c index c5ef711,9582948..a6bcdcd --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@@ -124,7 -124,7 +124,7 @@@ static void qe_init_rings(struct sunqe { struct qe_init_block *qb = qep->qe_block; struct sunqe_buffers *qbufs = qep->buffers; - __u32 qbufs_dvma = qep->buffers_dvma; + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; @@@ -144,6 -144,7 +144,7 @@@ static int qe_init(struct sunqe *qep, i void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; unsigned char *e = &qep->dev->dev_addr[0]; + __u32 qblk_dvma = (__u32)qep->qblock_dvma; u32 tmp; int i;
@@@ -152,8 -153,8 +153,8 @@@ return -EAGAIN;
/* Setup initial rx/tx init block pointers. */ - sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); - sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); + sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); + sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); @@@ -413,7 -414,7 +414,7 @@@ static void qe_rx(struct sunqe *qep struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; - __u32 qbufs_dvma = qep->buffers_dvma; + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; int elem = qep->rx_new; u32 flags;
@@@ -572,7 -573,7 +573,7 @@@ static int qe_start_xmit(struct sk_buf { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; - __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; + __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; unsigned char *txbuf; int len, entry;
@@@ -823,6 -824,7 +824,6 @@@ static const struct net_device_ops qec_ .ndo_start_xmit = qe_start_xmit, .ndo_set_rx_mode = qe_set_multicast, .ndo_tx_timeout = qe_tx_timeout, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --combined drivers/net/ethernet/ti/cpsw.c index 39d06e8,58947aa..da40ea5 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@@ -1376,6 -1376,10 +1376,6 @@@ static int cpsw_ndo_open(struct net_dev ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
if (!cpsw_common_res_usage_state(cpsw)) { - /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0); - /* disable priority elevation */ __raw_writel(0, &cpsw->regs->ptype);
@@@ -1879,6 -1883,7 +1879,6 @@@ static const struct net_device_ops cpsw .ndo_set_mac_address = cpsw_ndo_set_mac_address, .ndo_do_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, .ndo_tx_timeout = cpsw_ndo_tx_timeout, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER @@@ -1962,30 -1967,27 +1962,30 @@@ static int cpsw_get_ts_info(struct net_ return 0; }
-static int cpsw_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int cpsw_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; int slave_no = cpsw_slave_index(cpsw, priv);
if (cpsw->slaves[slave_no].phy) - return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd); + return phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, + ecmd); else return -EOPNOTSUPP; }
-static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int cpsw_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *ecmd) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; int slave_no = cpsw_slave_index(cpsw, priv);
if (cpsw->slaves[slave_no].phy) - return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd); + return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, + ecmd); else return -EOPNOTSUPP; } @@@ -2243,6 -2245,8 +2243,6 @@@ static const struct ethtool_ops cpsw_et .set_msglevel = cpsw_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = cpsw_get_ts_info, - .get_settings = cpsw_get_settings, - .set_settings = cpsw_set_settings, .get_coalesce = cpsw_get_coalesce, .set_coalesce = cpsw_set_coalesce, .get_sset_count = cpsw_get_sset_count, @@@ -2258,8 -2262,6 +2258,8 @@@ .complete = cpsw_ethtool_op_complete, .get_channels = cpsw_get_channels, .set_channels = cpsw_set_channels, + .get_link_ksettings = cpsw_get_link_ksettings, + .set_link_ksettings = cpsw_set_link_ksettings, };
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, @@@ -2373,8 -2375,11 +2373,11 @@@ static int cpsw_probe_dt(struct cpsw_pl * to the PHY is the Ethernet MAC DT node. */ ret = of_phy_register_fixed_link(slave_node); - if (ret) + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); return ret; + } slave_data->phy_node = of_node_get(slave_node); } else if (parp) { u32 phyid; @@@ -2395,6 -2400,7 +2398,7 @@@ } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); + put_device(&mdio->dev); } else { dev_err(&pdev->dev, "No slave[%d] phy_id, phy-handle, or fixed-link property\n", @@@ -2438,6 -2444,46 +2442,46 @@@ no_phy_slave return 0; }
+ static void cpsw_remove_dt(struct platform_device *pdev) + { + struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_platform_data *data = &cpsw->data; + struct device_node *node = pdev->dev.of_node; + struct device_node *slave_node; + int i = 0; + + for_each_available_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = &data->slave_data[i]; + + if (strcmp(slave_node->name, "slave")) + continue; + + if (of_phy_is_fixed_link(slave_node)) { + struct phy_device *phydev; + + phydev = of_phy_find_device(slave_node); + if (phydev) { + fixed_phy_unregister(phydev); + /* Put references taken by + * of_phy_find_device() and + * of_phy_register_fixed_link(). + */ + phy_device_free(phydev); + phy_device_free(phydev); + } + } + + of_node_put(slave_data->phy_node); + + i++; + if (i == data->slaves) + break; + } + + of_platform_depopulate(&pdev->dev); + } + static int cpsw_probe_dual_emac(struct cpsw_priv *priv) { struct cpsw_common *cpsw = priv->cpsw; @@@ -2545,6 -2591,9 +2589,9 @@@ static int cpsw_probe(struct platform_d int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); + if (!cpsw) + return -ENOMEM; + cpsw->dev = &pdev->dev;
ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); @@@ -2582,11 -2631,19 +2629,19 @@@ /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&cpsw->data, pdev)) { - dev_err(&pdev->dev, "cpsw: platform data missing\n"); - ret = -ENODEV; + /* Need to enable clocks with runtime PM api to access module + * registers + */ + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto clean_runtime_disable_ret; } + + ret = cpsw_probe_dt(&cpsw->data, pdev); + if (ret) + goto clean_dt_ret; + data = &cpsw->data; cpsw->rx_ch_num = 1; cpsw->tx_ch_num = 1; @@@ -2606,7 -2663,7 +2661,7 @@@ GFP_KERNEL); if (!cpsw->slaves) { ret = -ENOMEM; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } for (i = 0; i < data->slaves; i++) cpsw->slaves[i].slave_num = i; @@@ -2618,7 -2675,7 +2673,7 @@@ if (IS_ERR(clk)) { dev_err(priv->dev, "fck is not found\n"); ret = -ENODEV; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
@@@ -2626,26 -2683,17 +2681,17 @@@ ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); if (IS_ERR(ss_regs)) { ret = PTR_ERR(ss_regs); - goto clean_runtime_disable_ret; + goto clean_dt_ret; } cpsw->regs = ss_regs;
- /* Need to enable clocks with runtime PM api to access module - * registers - */ - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); - goto clean_runtime_disable_ret; - } cpsw->version = readl(&cpsw->regs->id_ver); - pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(cpsw->wr_regs)) { ret = PTR_ERR(cpsw->wr_regs); - goto clean_runtime_disable_ret; + goto clean_dt_ret; }
memset(&dma_params, 0, sizeof(dma_params)); @@@ -2682,7 -2730,7 +2728,7 @@@ default: dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); ret = -ENODEV; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } for (i = 0; i < cpsw->data.slaves; i++) { struct cpsw_slave *slave = &cpsw->slaves[i]; @@@ -2711,7 -2759,7 +2757,7 @@@ if (!cpsw->dma) { dev_err(priv->dev, "error initializing dma\n"); ret = -ENOMEM; - goto clean_runtime_disable_ret; + goto clean_dt_ret; }
cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); @@@ -2809,16 -2857,23 +2855,23 @@@ ret = cpsw_probe_dual_emac(priv); if (ret) { cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); - goto clean_ale_ret; + goto clean_unregister_netdev_ret; } }
+ pm_runtime_put(&pdev->dev); + return 0;
+ clean_unregister_netdev_ret: + unregister_netdev(ndev); clean_ale_ret: cpsw_ale_destroy(cpsw->ale); clean_dma_ret: cpdma_ctlr_destroy(cpsw->dma); + clean_dt_ret: + cpsw_remove_dt(pdev); + pm_runtime_put_sync(&pdev->dev); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); clean_ndev_ret: @@@ -2844,7 -2899,7 +2897,7 @@@ static int cpsw_remove(struct platform_
cpsw_ale_destroy(cpsw->ale); cpdma_ctlr_destroy(cpsw->dma); - of_platform_depopulate(&pdev->dev); + cpsw_remove_dt(pdev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); if (cpsw->data.dual_emac) diff --combined drivers/net/virtio_net.c index ca5239a,7276d5a..d4ac7a6 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@@ -1419,6 -1419,17 +1419,6 @@@ static const struct ethtool_ops virtnet .set_settings = virtnet_set_settings, };
-#define MIN_MTU 68 -#define MAX_MTU 65535 - -static int virtnet_change_mtu(struct net_device *dev, int new_mtu) -{ - if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} - static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@@ -1426,6 -1437,7 +1426,6 @@@ .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, - .ndo_change_mtu = virtnet_change_mtu, .ndo_get_stats64 = virtnet_stats, .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, @@@ -1485,6 -1497,11 +1485,11 @@@ static void virtnet_free_queues(struct netif_napi_del(&vi->rq[i].napi); }
+ /* We called napi_hash_del() before netif_napi_del(), + * we need to respect an RCU grace period before freeing vi->rq + */ + synchronize_net(); + kfree(vi->rq); kfree(vi->sq); } @@@ -1736,9 -1753,6 +1741,9 @@@ static bool virtnet_validate_features(s return true; }
+#define MIN_MTU ETH_MIN_MTU +#define MAX_MTU ETH_MAX_MTU + static int virtnet_probe(struct virtio_device *vdev) { int i, err; @@@ -1812,10 -1826,6 +1817,10 @@@
dev->vlan_features = dev->features;
+ /* MTU range: 68 - 65535 */ + dev->min_mtu = MIN_MTU; + dev->max_mtu = MAX_MTU; + /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) virtio_cread_bytes(vdev, @@@ -1870,12 -1880,8 +1875,12 @@@ mtu = virtio_cread16(vdev, offsetof(struct virtio_net_config, mtu)); - if (virtnet_change_mtu(dev, mtu)) + if (mtu < dev->min_mtu) { __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); + } else { + dev->mtu = mtu; + dev->max_mtu = mtu; + } }
if (vi->any_header_sg) diff --combined drivers/net/wireless/mac80211_hwsim.c index 1293f84,d3bad57..1620a5d --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@@ -250,7 -250,7 +250,7 @@@ static inline void hwsim_clear_chanctx_ cp->magic = 0; }
-static int hwsim_net_id; +static unsigned int hwsim_net_id;
static int hwsim_netgroup;
@@@ -587,8 -587,15 +587,8 @@@ struct hwsim_radiotap_ack_hdr __le16 rt_chbitmask; } __packed;
-/* MAC80211_HWSIM netlinf family */ -static struct genl_family hwsim_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = 0, - .name = "MAC80211_HWSIM", - .version = 1, - .maxattr = HWSIM_ATTR_MAX, - .netnsok = true, -}; +/* MAC80211_HWSIM netlink family */ +static struct genl_family hwsim_genl_family;
enum hwsim_multicast_groups { HWSIM_MCGRP_CONFIG, @@@ -819,7 -826,7 +819,7 @@@ static void mac80211_hwsim_set_tsf(stru data->bcn_delta = do_div(delta, bcn_int); } else { data->tsf_offset -= delta; - data->bcn_delta = -do_div(delta, bcn_int); + data->bcn_delta = -(s64)do_div(delta, bcn_int); } }
@@@ -2249,51 -2256,35 +2249,51 @@@ static void mac80211_hwsim_get_et_stats WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); }
+#define HWSIM_COMMON_OPS \ + .tx = mac80211_hwsim_tx, \ + .start = mac80211_hwsim_start, \ + .stop = mac80211_hwsim_stop, \ + .add_interface = mac80211_hwsim_add_interface, \ + .change_interface = mac80211_hwsim_change_interface, \ + .remove_interface = mac80211_hwsim_remove_interface, \ + .config = mac80211_hwsim_config, \ + .configure_filter = mac80211_hwsim_configure_filter, \ + .bss_info_changed = mac80211_hwsim_bss_info_changed, \ + .sta_add = mac80211_hwsim_sta_add, \ + .sta_remove = mac80211_hwsim_sta_remove, \ + .sta_notify = mac80211_hwsim_sta_notify, \ + .set_tim = mac80211_hwsim_set_tim, \ + .conf_tx = mac80211_hwsim_conf_tx, \ + .get_survey = mac80211_hwsim_get_survey, \ + CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \ + .ampdu_action = mac80211_hwsim_ampdu_action, \ + .flush = mac80211_hwsim_flush, \ + .get_tsf = mac80211_hwsim_get_tsf, \ + .set_tsf = mac80211_hwsim_set_tsf, \ + .get_et_sset_count = mac80211_hwsim_get_et_sset_count, \ + .get_et_stats = mac80211_hwsim_get_et_stats, \ + .get_et_strings = mac80211_hwsim_get_et_strings, + static const struct ieee80211_ops mac80211_hwsim_ops = { - .tx = mac80211_hwsim_tx, - .start = mac80211_hwsim_start, - .stop = mac80211_hwsim_stop, - .add_interface = mac80211_hwsim_add_interface, - .change_interface = mac80211_hwsim_change_interface, - .remove_interface = mac80211_hwsim_remove_interface, - .config = mac80211_hwsim_config, - .configure_filter = mac80211_hwsim_configure_filter, - .bss_info_changed = mac80211_hwsim_bss_info_changed, - .sta_add = mac80211_hwsim_sta_add, - .sta_remove = mac80211_hwsim_sta_remove, - .sta_notify = mac80211_hwsim_sta_notify, - .set_tim = mac80211_hwsim_set_tim, - .conf_tx = mac80211_hwsim_conf_tx, - .get_survey = mac80211_hwsim_get_survey, - CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) - .ampdu_action = mac80211_hwsim_ampdu_action, + HWSIM_COMMON_OPS .sw_scan_start = mac80211_hwsim_sw_scan, .sw_scan_complete = mac80211_hwsim_sw_scan_complete, - .flush = mac80211_hwsim_flush, - .get_tsf = mac80211_hwsim_get_tsf, - .set_tsf = mac80211_hwsim_set_tsf, - .get_et_sset_count = mac80211_hwsim_get_et_sset_count, - .get_et_stats = mac80211_hwsim_get_et_stats, - .get_et_strings = mac80211_hwsim_get_et_strings, };
-static struct ieee80211_ops mac80211_hwsim_mchan_ops; +static const struct ieee80211_ops mac80211_hwsim_mchan_ops = { + HWSIM_COMMON_OPS + .hw_scan = mac80211_hwsim_hw_scan, + .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, + .sw_scan_start = NULL, + .sw_scan_complete = NULL, + .remain_on_channel = mac80211_hwsim_roc, + .cancel_remain_on_channel = mac80211_hwsim_croc, + .add_chanctx = mac80211_hwsim_add_chanctx, + .remove_chanctx = mac80211_hwsim_remove_chanctx, + .change_chanctx = mac80211_hwsim_change_chanctx, + .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx, + .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, +};
struct hwsim_new_radio_params { unsigned int channels; @@@ -2800,6 -2791,7 +2800,6 @@@ static void mac80211_hwsim_free(void
static const struct net_device_ops hwsim_netdev_ops = { .ndo_start_xmit = hwsim_mon_xmit, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@@ -3244,18 -3236,6 +3244,18 @@@ static const struct genl_ops hwsim_ops[ }, };
+static struct genl_family hwsim_genl_family __ro_after_init = { + .name = "MAC80211_HWSIM", + .version = 1, + .maxattr = HWSIM_ATTR_MAX, + .netnsok = true, + .module = THIS_MODULE, + .ops = hwsim_ops, + .n_ops = ARRAY_SIZE(hwsim_ops), + .mcgrps = hwsim_mcgrps, + .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), +}; + static void destroy_radio(struct work_struct *work) { struct mac80211_hwsim_data *data = @@@ -3303,13 -3283,15 +3303,13 @@@ static struct notifier_block hwsim_netl .notifier_call = mac80211_hwsim_netlink_notify, };
-static int hwsim_init_netlink(void) +static int __init hwsim_init_netlink(void) { int rc;
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- rc = genl_register_family_with_ops_groups(&hwsim_genl_family, - hwsim_ops, - hwsim_mcgrps); + rc = genl_register_family(&hwsim_genl_family); if (rc) goto failure;
@@@ -3378,6 -3360,21 +3378,6 @@@ static int __init init_mac80211_hwsim(v if (channels < 1) return -EINVAL;
- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops; - mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan; - mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan; - mac80211_hwsim_mchan_ops.sw_scan_start = NULL; - mac80211_hwsim_mchan_ops.sw_scan_complete = NULL; - mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc; - mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc; - mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx; - mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx; - mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx; - mac80211_hwsim_mchan_ops.assign_vif_chanctx = - mac80211_hwsim_assign_vif_chanctx; - mac80211_hwsim_mchan_ops.unassign_vif_chanctx = - mac80211_hwsim_unassign_vif_chanctx; - spin_lock_init(&hwsim_radio_lock);
err = register_pernet_device(&hwsim_net_ops); diff --combined include/linux/bpf_verifier.h index ac5b393,6aaf425..7453c12 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@@ -14,7 -14,7 +14,7 @@@ * are obviously wrong for any sort of memory access. */ #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) - #define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) + #define BPF_REGISTER_MIN_RANGE -1
struct bpf_reg_state { enum bpf_reg_type type; @@@ -22,14 -22,15 +22,15 @@@ * Used to determine if any memory access using this register will * result in a bad access. */ - u64 min_value, max_value; + s64 min_value; + u64 max_value; + u32 id; union { /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ s64 imm;
/* valid when type == PTR_TO_PACKET* */ struct { - u32 id; u16 off; u16 range; }; diff --combined include/net/net_namespace.h index d7149e9,0940598..af8fe8a --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@@ -170,7 -170,7 +170,7 @@@ static inline struct net *copy_net_ns(u extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid); - struct net *get_net_ns_by_fd(int pid); + struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL void ipx_register_sysctl(void); @@@ -291,7 -291,7 +291,7 @@@ struct pernet_operations int (*init)(struct net *net); void (*exit)(struct net *net); void (*exit_batch)(struct list_head *net_exit_list); - int *id; + unsigned int *id; size_t size; };
diff --combined kernel/bpf/verifier.c index 89f787c,6a93615..8740c5f --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -19,7 -19,6 +19,7 @@@ #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> +#include <linux/stringify.h>
/* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. @@@ -191,22 -190,6 +191,22 @@@ static const char * const reg_type_str[ [PTR_TO_PACKET_END] = "pkt_end", };
+#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) +static const char * const func_id_str[] = { + __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) +}; +#undef __BPF_FUNC_STR_FN + +static const char *func_id_name(int id) +{ + BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); + + if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) + return func_id_str[id]; + else + return "unknown"; +} + static void print_verifier_state(struct bpf_verifier_state *state) { struct bpf_reg_state *reg; @@@ -229,13 -212,12 +229,13 @@@ else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL || t == PTR_TO_MAP_VALUE_ADJ) - verbose("(ks=%d,vs=%d)", + verbose("(ks=%d,vs=%d,id=%u)", reg->map_ptr->key_size, - reg->map_ptr->value_size); + reg->map_ptr->value_size, + reg->id); if (reg->min_value != BPF_REGISTER_MIN_RANGE) - verbose(",min_value=%llu", - (unsigned long long)reg->min_value); + verbose(",min_value=%lld", + (long long)reg->min_value); if (reg->max_value != BPF_REGISTER_MAX_RANGE) verbose(",max_value=%llu", (unsigned long long)reg->max_value); @@@ -371,8 -353,7 +371,8 @@@ static void print_bpf_insn(struct bpf_i u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) { - verbose("(%02x) call %d\n", insn->code, insn->imm); + verbose("(%02x) call %s#%d\n", insn->code, + func_id_name(insn->imm), insn->imm); } else if (insn->code == (BPF_JMP | BPF_JA)) { verbose("(%02x) goto pc%+d\n", insn->code, insn->off); @@@ -466,7 -447,6 +466,7 @@@ static void mark_reg_unknown_value(stru { BUG_ON(regno >= MAX_BPF_REG); regs[regno].type = UNKNOWN_VALUE; + regs[regno].id = 0; regs[regno].imm = 0; }
@@@ -778,7 -758,7 +778,7 @@@ static int check_mem_access(struct bpf_ * index'es we need to make sure that whatever we use * will have a set floor within our range. */ - if ((s64)reg->min_value < 0) { + if (reg->min_value < 0) { verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; @@@ -1132,8 -1112,8 +1132,8 @@@ static int check_map_func_compatibility
return 0; error: - verbose("cannot pass map_type %d into func %d\n", - map->map_type, func_id); + verbose("cannot pass map_type %d into func %s#%d\n", + map->map_type, func_id_name(func_id), func_id); return -EINVAL; }
@@@ -1190,7 -1170,7 +1190,7 @@@ static int check_call(struct bpf_verifi
/* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { - verbose("invalid func %d\n", func_id); + verbose("invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; }
@@@ -1198,7 -1178,7 +1198,7 @@@ fn = env->prog->aux->ops->get_func_proto(func_id);
if (!fn) { - verbose("unknown func %d\n", func_id); + verbose("unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; }
@@@ -1218,8 -1198,7 +1218,8 @@@ */ err = check_raw_mode(fn); if (err) { - verbose("kernel subsystem misconfigured func %d\n", func_id); + verbose("kernel subsystem misconfigured func %s#%d\n", + func_id_name(func_id), func_id); return err; }
@@@ -1273,10 -1252,9 +1273,10 @@@ return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; + regs[BPF_REG_0].id = ++env->id_gen; } else { - verbose("unknown return type %d of func %d\n", - fn->ret_type, func_id); + verbose("unknown return type %d of func %s#%d\n", + fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; }
@@@ -1490,7 -1468,8 +1490,8 @@@ static void check_reg_overflow(struct b { if (reg->max_value > BPF_REGISTER_MAX_RANGE) reg->max_value = BPF_REGISTER_MAX_RANGE; - if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) + if (reg->min_value < BPF_REGISTER_MIN_RANGE || + reg->min_value > BPF_REGISTER_MAX_RANGE) reg->min_value = BPF_REGISTER_MIN_RANGE; }
@@@ -1498,7 -1477,9 +1499,8 @@@ static void adjust_reg_min_max_vals(str struct bpf_insn *insn) { struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; - u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; + s64 min_val = BPF_REGISTER_MIN_RANGE; + u64 max_val = BPF_REGISTER_MAX_RANGE; - bool min_set = false, max_set = false; u8 opcode = BPF_OP(insn->code);
dst_reg = ®s[insn->dst_reg]; @@@ -1521,6 -1502,7 +1523,6 @@@ } else if (insn->imm < BPF_REGISTER_MAX_RANGE && (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { min_val = max_val = insn->imm; - min_set = max_set = true; }
/* We don't know anything about what was done to this register, mark it @@@ -1532,22 -1514,43 +1534,43 @@@ return; }
+ /* If one of our values was at the end of our ranges then we can't just + * do our normal operations to the register, we need to set the values + * to the min/max since they are undefined. + */ + if (min_val == BPF_REGISTER_MIN_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (max_val == BPF_REGISTER_MAX_RANGE) + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + switch (opcode) { case BPF_ADD: - dst_reg->min_value += min_val; - dst_reg->max_value += max_val; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value += min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value += max_val; break; case BPF_SUB: - dst_reg->min_value -= min_val; - dst_reg->max_value -= max_val; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value -= min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value -= max_val; break; case BPF_MUL: - dst_reg->min_value *= min_val; - dst_reg->max_value *= max_val; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value *= min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value *= max_val; break; case BPF_AND: - /* & is special since it could end up with 0 bits set. */ - dst_reg->min_value &= min_val; + /* Disallow AND'ing of negative numbers, ain't nobody got time + * for that. Otherwise the minimum is 0 and the max is the max + * value we could AND against. + */ + if (min_val < 0) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + else + dst_reg->min_value = 0; dst_reg->max_value = max_val; break; case BPF_LSH: @@@ -1557,24 -1560,25 +1580,25 @@@ */ if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - else + else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) dst_reg->min_value <<= min_val;
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) dst_reg->max_value = BPF_REGISTER_MAX_RANGE; - else + else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) dst_reg->max_value <<= max_val; break; case BPF_RSH: - dst_reg->min_value >>= min_val; - dst_reg->max_value >>= max_val; - break; - case BPF_MOD: - /* % is special since it is an unsigned modulus, so the floor - * will always be 0. + /* RSH by a negative number is undefined, and the BPF_RSH is an + * unsigned shift, so make the appropriate casts. */ - dst_reg->min_value = 0; - dst_reg->max_value = max_val - 1; + if (min_val < 0 || dst_reg->min_value < 0) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + else + dst_reg->min_value = + (u64)(dst_reg->min_value) >> min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value >>= max_val; break; default: reset_reg_range_values(regs, insn->dst_reg); @@@ -1664,7 -1668,8 +1688,7 @@@ static int check_alu_op(struct bpf_veri insn->src_reg); return -EACCES; } - regs[insn->dst_reg].type = UNKNOWN_VALUE; - regs[insn->dst_reg].map_ptr = NULL; + mark_reg_unknown_value(regs, insn->dst_reg); } } else { /* case: R = imm @@@ -1926,38 -1931,6 +1950,38 @@@ static void reg_set_min_max_inv(struct check_reg_overflow(true_reg); }
+static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, + enum bpf_reg_type type) +{ + struct bpf_reg_state *reg = ®s[regno]; + + if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { + reg->type = type; + if (type == UNKNOWN_VALUE) + mark_reg_unknown_value(regs, regno); + } +} + +/* The logic is similar to find_good_pkt_pointers(), both could eventually + * be folded together at some point. + */ +static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, + enum bpf_reg_type type) +{ + struct bpf_reg_state *regs = state->regs; + int i; + + for (i = 0; i < MAX_BPF_REG; i++) + mark_map_reg(regs, i, regs[regno].id, type); + + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { + if (state->stack_slot_type[i] != STACK_SPILL) + continue; + mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, + regs[regno].id, type); + } +} + static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { @@@ -2045,13 -2018,18 +2069,13 @@@ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { - if (opcode == BPF_JEQ) { - /* next fallthrough insn can access memory via - * this register - */ - regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; - /* branch targer cannot access it, since reg == 0 */ - mark_reg_unknown_value(other_branch->regs, - insn->dst_reg); - } else { - other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; - mark_reg_unknown_value(regs, insn->dst_reg); - } + /* Mark all identical map registers in each branch as either + * safe or unknown depending R == 0 or R != 0 conditional. + */ + mark_map_regs(this_branch, insn->dst_reg, + opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); + mark_map_regs(other_branch, insn->dst_reg, + opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && dst_reg->type == PTR_TO_PACKET && regs[insn->src_reg].type == PTR_TO_PACKET_END) { diff --combined net/batman-adv/hard-interface.c index 672150b4,08ce361..61a431a --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@@ -92,8 -92,8 +92,8 @@@ out * * Return: result of rtnl_link_ops->get_link_net or @fallback_net */ -static const struct net *batadv_getlink_net(const struct net_device *netdev, - const struct net *fallback_net) +static struct net *batadv_getlink_net(const struct net_device *netdev, + struct net *fallback_net) { if (!netdev->rtnl_link_ops) return fallback_net; @@@ -116,9 -116,9 +116,9 @@@ * Return: true if the devices are each others parent, otherwise false */ static bool batadv_mutual_parents(const struct net_device *dev1, - const struct net *net1, + struct net *net1, const struct net_device *dev2, - const struct net *net2) + struct net *net2) { int dev1_parent_iflink = dev_get_iflink(dev1); int dev2_parent_iflink = dev_get_iflink(dev2); @@@ -154,7 -154,7 +154,7 @@@ static bool batadv_is_on_batman_iface(c { struct net *net = dev_net(net_dev); struct net_device *parent_dev; - const struct net *parent_net; + struct net *parent_net; bool ret;
/* check if this is a batman-adv mesh interface */ @@@ -202,77 -202,13 +202,77 @@@ static bool batadv_is_valid_iface(cons }
/** - * batadv_is_wifi_netdev - check if the given net_device struct is a wifi - * interface + * batadv_get_real_netdevice - check if the given netdev struct is a virtual + * interface on top of another 'real' interface + * @netdev: the device to check + * + * Callers must hold the rtnl semaphore. You may want batadv_get_real_netdev() + * instead of this. + * + * Return: the 'real' net device or the original net device and NULL in case + * of an error. + */ +static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) +{ + struct batadv_hard_iface *hard_iface = NULL; + struct net_device *real_netdev = NULL; + struct net *real_net; + struct net *net; + int ifindex; + + ASSERT_RTNL(); + + if (!netdev) + return NULL; + + if (netdev->ifindex == dev_get_iflink(netdev)) { + dev_hold(netdev); + return netdev; + } + + hard_iface = batadv_hardif_get_by_netdev(netdev); + if (!hard_iface || !hard_iface->soft_iface) + goto out; + + net = dev_net(hard_iface->soft_iface); + ifindex = dev_get_iflink(netdev); + real_net = batadv_getlink_net(netdev, net); + real_netdev = dev_get_by_index(real_net, ifindex); + +out: + if (hard_iface) + batadv_hardif_put(hard_iface); + return real_netdev; +} + +/** + * batadv_get_real_netdev - check if the given net_device struct is a virtual + * interface on top of another 'real' interface * @net_device: the device to check * - * Return: true if the net device is a 802.11 wireless device, false otherwise. + * Return: the 'real' net device or the original net device and NULL in case + * of an error. */ -bool batadv_is_wifi_netdev(struct net_device *net_device) +struct net_device *batadv_get_real_netdev(struct net_device *net_device) +{ + struct net_device *real_netdev; + + rtnl_lock(); + real_netdev = batadv_get_real_netdevice(net_device); + rtnl_unlock(); + + return real_netdev; +} + +/** + * batadv_is_wext_netdev - check if the given net_device struct is a + * wext wifi interface + * @net_device: the device to check + * + * Return: true if the net device is a wext wireless device, false + * otherwise. + */ +static bool batadv_is_wext_netdev(struct net_device *net_device) { if (!net_device) return false; @@@ -285,22 -221,6 +285,22 @@@ return true; #endif
+ return false; +} + +/** + * batadv_is_cfg80211_netdev - check if the given net_device struct is a + * cfg80211 wifi interface + * @net_device: the device to check + * + * Return: true if the net device is a cfg80211 wireless device, false + * otherwise. + */ +static bool batadv_is_cfg80211_netdev(struct net_device *net_device) +{ + if (!net_device) + return false; + /* cfg80211 drivers have to set ieee80211_ptr */ if (net_device->ieee80211_ptr) return true; @@@ -308,125 -228,6 +308,125 @@@ return false; }
+/** + * batadv_wifi_flags_evaluate - calculate wifi flags for net_device + * @net_device: the device to check + * + * Return: batadv_hard_iface_wifi_flags flags of the device + */ +static u32 batadv_wifi_flags_evaluate(struct net_device *net_device) +{ + u32 wifi_flags = 0; + struct net_device *real_netdev; + + if (batadv_is_wext_netdev(net_device)) + wifi_flags |= BATADV_HARDIF_WIFI_WEXT_DIRECT; + + if (batadv_is_cfg80211_netdev(net_device)) + wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT; + + real_netdev = batadv_get_real_netdevice(net_device); + if (!real_netdev) + return wifi_flags; + + if (real_netdev == net_device) + goto out; + + if (batadv_is_wext_netdev(real_netdev)) + wifi_flags |= BATADV_HARDIF_WIFI_WEXT_INDIRECT; + + if (batadv_is_cfg80211_netdev(real_netdev)) + wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT; + +out: + dev_put(real_netdev); + return wifi_flags; +} + +/** + * batadv_is_cfg80211_hardif - check if the given hardif is a cfg80211 wifi + * interface + * @hard_iface: the device to check + * + * Return: true if the net device is a cfg80211 wireless device, false + * otherwise. + */ +bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface) +{ + u32 allowed_flags = 0; + + allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT; + allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT; + + return !!(hard_iface->wifi_flags & allowed_flags); +} + +/** + * batadv_is_wifi_hardif - check if the given hardif is a wifi interface + * @hard_iface: the device to check + * + * Return: true if the net device is a 802.11 wireless device, false otherwise. + */ +bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface) +{ + if (!hard_iface) + return false; + + return hard_iface->wifi_flags != 0; +} + +/** + * batadv_hardif_no_broadcast - check whether (re)broadcast is necessary + * @if_outgoing: the outgoing interface checked and considered for (re)broadcast + * @orig_addr: the originator of this packet + * @orig_neigh: originator address of the forwarder we just got the packet from + * (NULL if we originated) + * + * Checks whether a packet needs to be (re)broadcasted on the given interface. + * + * Return: + * BATADV_HARDIF_BCAST_NORECIPIENT: No neighbor on interface + * BATADV_HARDIF_BCAST_DUPFWD: Just one neighbor, but it is the forwarder + * BATADV_HARDIF_BCAST_DUPORIG: Just one neighbor, but it is the originator + * BATADV_HARDIF_BCAST_OK: Several neighbors, must broadcast + */ +int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, + u8 *orig_addr, u8 *orig_neigh) +{ + struct batadv_hardif_neigh_node *hardif_neigh; + struct hlist_node *first; + int ret = BATADV_HARDIF_BCAST_OK; + + rcu_read_lock(); + + /* 0 neighbors -> no (re)broadcast */ + first = rcu_dereference(hlist_first_rcu(&if_outgoing->neigh_list)); + if (!first) { + ret = BATADV_HARDIF_BCAST_NORECIPIENT; + goto out; + } + + /* >1 neighbors -> (re)brodcast */ + if (rcu_dereference(hlist_next_rcu(first))) + goto out; + + hardif_neigh = hlist_entry(first, struct batadv_hardif_neigh_node, + list); + + /* 1 neighbor, is the originator -> no rebroadcast */ + if (orig_addr && batadv_compare_eth(hardif_neigh->orig, orig_addr)) { + ret = BATADV_HARDIF_BCAST_DUPORIG; + /* 1 neighbor, is the one we received from -> no rebroadcast */ + } else if (orig_neigh && + batadv_compare_eth(hardif_neigh->orig, orig_neigh)) { + ret = BATADV_HARDIF_BCAST_DUPFWD; + } + +out: + rcu_read_unlock(); + return ret; +} + static struct batadv_hard_iface * batadv_hardif_get_active(const struct net_device *soft_iface) { @@@ -851,6 -652,7 +851,7 @@@ void batadv_hardif_disable_interface(st batadv_softif_destroy_sysfs(hard_iface->soft_iface); }
+ hard_iface->soft_iface = NULL; batadv_hardif_put(hard_iface);
out: @@@ -895,8 -697,7 +896,8 @@@ batadv_hardif_add_interface(struct net_ kref_init(&hard_iface->refcount);
hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT; - if (batadv_is_wifi_netdev(net_dev)) + hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev); + if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
batadv_v_hardif_init(hard_iface); @@@ -1005,11 -806,6 +1006,11 @@@ static int batadv_hard_if_event(struct if (hard_iface == primary_if) batadv_primary_if_update_addr(bat_priv, NULL); break; + case NETDEV_CHANGEUPPER: + hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev); + if (batadv_is_wifi_hardif(hard_iface)) + hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; + break; default: break; } diff --combined net/batman-adv/tp_meter.c index f156452,8af1611..981e8c5 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@@ -615,6 -615,9 +615,6 @@@ static int batadv_tp_send_msg(struct ba batadv_tp_fill_prerandom(tp_vars, data, data_len);
r = batadv_send_skb_to_orig(skb, orig_node, NULL); - if (r == -1) - kfree_skb(skb); - if (r == NET_XMIT_SUCCESS) return 0;
@@@ -834,6 -837,7 +834,7 @@@ static int batadv_tp_send(void *arg primary_if = batadv_primary_if_get_selected(bat_priv); if (unlikely(!primary_if)) { err = BATADV_TP_REASON_DST_UNREACHABLE; + tp_vars->reason = err; goto out; }
@@@ -1203,6 -1207,9 +1204,6 @@@ static int batadv_tp_send_ack(struct ba
/* send the ack */ r = batadv_send_skb_to_orig(skb, orig_node, NULL); - if (r == -1) - kfree_skb(skb); - if (unlikely(r < 0) || (r == NET_XMIT_DROP)) { ret = BATADV_TP_REASON_DST_UNREACHABLE; goto out; diff --combined net/core/net_namespace.c index 35d37b1,7001da9..a38feac --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@@ -55,7 -55,7 +55,7 @@@ static struct net_generic *net_alloc_ge return ng; }
-static int net_assign_generic(struct net *net, int id, void *data) +static int net_assign_generic(struct net *net, unsigned int id, void *data) { struct net_generic *ng, *old_ng;
@@@ -122,7 -122,8 +122,7 @@@ out static void ops_free(const struct pernet_operations *ops, struct net *net) { if (ops->id && ops->size) { - int id = *ops->id; - kfree(net_generic(net, id)); + kfree(net_generic(net, *ops->id)); } }
@@@ -218,6 -219,8 +218,8 @@@ int peernet2id_alloc(struct net *net, s bool alloc; int id;
+ if (atomic_read(&net->count) == 0) + return NETNSA_NSID_NOT_ASSIGNED; spin_lock_irqsave(&net->nsid_lock, flags); alloc = atomic_read(&peer->count) == 0 ? false : true; id = __peernet2id_alloc(net, peer, &alloc); @@@ -381,14 -384,7 +383,14 @@@ struct net *copy_net_ns(unsigned long f
get_user_ns(user_ns);
- mutex_lock(&net_mutex); + rv = mutex_lock_killable(&net_mutex); + if (rv < 0) { + net_free(net); + dec_net_namespaces(ucounts); + put_user_ns(user_ns); + return ERR_PTR(rv); + } + net->ucounts = ucounts; rv = setup_net(net, user_ns); if (rv == 0) { @@@ -880,7 -876,7 +882,7 @@@ again } return error; } - max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); + max_gen_ptrs = max(max_gen_ptrs, *ops->id); } error = __register_pernet_operations(list, ops); if (error) { diff --combined net/ipv4/fib_frontend.c index d93eea8,161fc0f..121384b --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@@ -151,7 -151,7 +151,7 @@@ static void fib_replace_table(struct ne
int fib_unmerge(struct net *net) { - struct fib_table *old, *new; + struct fib_table *old, *new, *main_table;
/* attempt to fetch local table if it has been allocated */ old = fib_get_table(net, RT_TABLE_LOCAL); @@@ -162,11 -162,21 +162,21 @@@ if (!new) return -ENOMEM;
+ /* table is already unmerged */ + if (new == old) + return 0; + /* replace merged table with clean table */ - if (new != old) { - fib_replace_table(net, old, new); - fib_free_table(old); - } + fib_replace_table(net, old, new); + fib_free_table(old); + + /* attempt to fetch main table if it has been allocated */ + main_table = fib_get_table(net, RT_TABLE_MAIN); + if (!main_table) + return 0; + + /* flush local entries from main table */ + fib_table_flush_external(main_table);
return 0; } @@@ -610,7 -620,6 +620,7 @@@ const struct nla_policy rtm_ipv4_policy [RTA_FLOW] = { .type = NLA_U32 }, [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, [RTA_ENCAP] = { .type = NLA_NESTED }, + [RTA_UID] = { .type = NLA_U32 }, };
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, diff --combined net/ipv4/tcp_cong.c index 38905ec,f9038d6b..79c4817 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@@ -68,9 -68,8 +68,9 @@@ int tcp_register_congestion_control(str { int ret = 0;
- /* all algorithms must implement ssthresh and cong_avoid ops */ - if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) { + /* all algorithms must implement these */ + if (!ca->ssthresh || !ca->undo_cwnd || + !(ca->cong_avoid || ca->cong_control)) { pr_err("%s does not implement required ops\n", ca->name); return -EINVAL; } @@@ -201,8 -200,10 +201,10 @@@ static void tcp_reinit_congestion_contr icsk->icsk_ca_ops = ca; icsk->icsk_ca_setsockopt = 1;
- if (sk->sk_state != TCP_CLOSE) + if (sk->sk_state != TCP_CLOSE) { + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); tcp_init_congestion_control(sk); + } }
/* Manage refcounts on socket close. */ @@@ -442,19 -443,10 +444,19 @@@ u32 tcp_reno_ssthresh(struct sock *sk } EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
+u32 tcp_reno_undo_cwnd(struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + + return max(tp->snd_cwnd, tp->snd_ssthresh << 1); +} +EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); + struct tcp_congestion_ops tcp_reno = { .flags = TCP_CONG_NON_RESTRICTED, .name = "reno", .owner = THIS_MODULE, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, + .undo_cwnd = tcp_reno_undo_cwnd, }; diff --combined net/ipv4/udp.c index b949770,0de9d5d..b3b6bc5 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@@ -580,8 -580,7 +580,8 @@@ EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb) * Does increment socket refcount. */ #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ - IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) + IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ + IS_ENABLED(CONFIG_NF_SOCKET_IPV4) struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { @@@ -1020,8 -1019,7 +1020,8 @@@ int udp_sendmsg(struct sock *sk, struc flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, flow_flags, - faddr, saddr, dport, inet->inet_sport); + faddr, saddr, dport, inet->inet_sport, + sk->sk_uid);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); @@@ -1174,120 -1172,6 +1174,120 @@@ out return ret; }
+/* fully reclaim rmem/fwd memory allocated for skb */ +static void udp_rmem_release(struct sock *sk, int size, int partial) +{ + int amt; + + atomic_sub(size, &sk->sk_rmem_alloc); + sk->sk_forward_alloc += size; + amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); + sk->sk_forward_alloc -= amt; + + if (amt) + __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); +} + +/* Note: called with sk_receive_queue.lock held */ +void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) +{ + udp_rmem_release(sk, skb->truesize, 1); +} +EXPORT_SYMBOL(udp_skb_destructor); + +int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff_head *list = &sk->sk_receive_queue; + int rmem, delta, amt, err = -ENOMEM; + int size = skb->truesize; + + /* try to avoid the costly atomic add/sub pair when the receive + * queue is full; always allow at least a packet + */ + rmem = atomic_read(&sk->sk_rmem_alloc); + if (rmem && (rmem + size > sk->sk_rcvbuf)) + goto drop; + + /* we drop only if the receive buf is full and the receive + * queue contains some other skb + */ + rmem = atomic_add_return(size, &sk->sk_rmem_alloc); + if ((rmem > sk->sk_rcvbuf) && (rmem > size)) + goto uncharge_drop; + + spin_lock(&list->lock); + if (size >= sk->sk_forward_alloc) { + amt = sk_mem_pages(size); + delta = amt << SK_MEM_QUANTUM_SHIFT; + if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { + err = -ENOBUFS; + spin_unlock(&list->lock); + goto uncharge_drop; + } + + sk->sk_forward_alloc += delta; + } + + sk->sk_forward_alloc -= size; + + /* no need to setup a destructor, we will explicitly release the + * forward allocated memory on dequeue + */ + skb->dev = NULL; + sock_skb_set_dropcount(sk, skb); + + __skb_queue_tail(list, skb); + spin_unlock(&list->lock); + + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk); + + return 0; + +uncharge_drop: + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + +drop: + atomic_inc(&sk->sk_drops); + return err; +} +EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); + +void udp_destruct_sock(struct sock *sk) +{ + /* reclaim completely the forward allocated memory */ + unsigned int total = 0; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { + total += skb->truesize; + kfree_skb(skb); + } + udp_rmem_release(sk, total, 0); + + inet_sock_destruct(sk); +} +EXPORT_SYMBOL_GPL(udp_destruct_sock); + +int udp_init_sock(struct sock *sk) +{ + sk->sk_destruct = udp_destruct_sock; + return 0; +} +EXPORT_SYMBOL_GPL(udp_init_sock); + +void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) +{ + if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { + bool slow = lock_sock_fast(sk); + + sk_peek_offset_bwd(sk, len); + unlock_sock_fast(sk, slow); + } + consume_skb(skb); +} +EXPORT_SYMBOL_GPL(skb_consume_udp); + /** * first_packet_length - return length of first packet in receive queue * @sk: socket @@@ -1297,11 -1181,12 +1297,11 @@@ */ static int first_packet_length(struct sock *sk) { - struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; + struct sk_buff_head *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; + int total = 0; int res;
- __skb_queue_head_init(&list_kill); - spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { @@@ -1311,13 -1196,18 +1311,13 @@@ IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); - __skb_queue_tail(&list_kill, skb); + total += skb->truesize; + kfree_skb(skb); } res = skb ? skb->len : -1; + if (total) + udp_rmem_release(sk, total, 1); spin_unlock_bh(&rcvq->lock); - - if (!skb_queue_empty(&list_kill)) { - bool slow = lock_sock_fast(sk); - - __skb_queue_purge(&list_kill); - sk_mem_reclaim_partial(sk); - unlock_sock_fast(sk, slow); - } return res; }
@@@ -1366,13 -1256,15 +1366,13 @@@ int udp_recvmsg(struct sock *sk, struc int err; int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; - bool slow;
if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len, addr_len);
try_again: peeking = off = sk_peek_offset(sk, flags); - skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), - &peeked, &off, &err); + skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); if (!skb) return err;
@@@ -1389,8 -1281,7 +1389,8 @@@ * coverage checksum (UDP-Lite), do it before the copy. */
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) { + if (copied < ulen || peeking || + (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { checksum_valid = !udp_lib_checksum_complete(skb); if (!checksum_valid) goto csum_copy_err; @@@ -1406,12 -1297,13 +1406,12 @@@ }
if (unlikely(err)) { - trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - skb_free_datagram_locked(sk, skb); + kfree_skb(skb); return err; }
@@@ -1430,21 -1322,22 +1430,21 @@@ *addr_len = sizeof(*sin); } if (inet->cmsg_flags) - ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off); + ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
err = copied; if (flags & MSG_TRUNC) err = ulen;
- __skb_free_datagram_locked(sk, skb, peeking ? -err : err); + skb_consume_udp(sk, skb, peeking ? -err : err); return err;
csum_copy_err: - slow = lock_sock_fast(sk); - if (!skb_kill_datagram(sk, skb, flags)) { + if (!__sk_queue_drop_skb(sk, skb, flags)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - unlock_sock_fast(sk, slow); + kfree_skb(skb);
/* starting over for a new packet, but check if we need to yield */ cond_resched(); @@@ -1570,11 -1463,9 +1570,11 @@@ static int __udp_queue_rcv_skb(struct s sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); sk_incoming_cpu_update(sk); + } else { + sk_mark_napi_id_once(sk, skb); }
- rc = __sock_queue_rcv_skb(sk, skb); + rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk);
@@@ -1589,6 -1480,7 +1589,6 @@@ }
return 0; - }
static struct static_key udp_encap_needed __read_mostly; @@@ -1610,6 -1502,7 +1610,6 @@@ EXPORT_SYMBOL(udp_encap_enable) int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); - int rc; int is_udplite = IS_UDPLITE(sk);
/* @@@ -1696,9 -1589,25 +1696,9 @@@ goto drop;
udp_csum_pull_header(skb); - if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { - __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, - is_udplite); - goto drop; - } - - rc = 0;
ipv4_pktinfo_prepare(sk, skb); - bh_lock_sock(sk); - if (!sock_owned_by_user(sk)) - rc = __udp_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { - bh_unlock_sock(sk); - goto drop; - } - bh_unlock_sock(sk); - - return rc; + return __udp_queue_rcv_skb(sk, skb);
csum_error: __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); @@@ -1743,10 -1652,10 +1743,10 @@@ static int __udp4_lib_mcast_deliver(str
if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & - udp_table.mask; - hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; + udptable->mask; + hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udp_table.hash2[hash2]; + hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); }
@@@ -2308,13 -2217,13 +2308,13 @@@ struct proto udp_prot = .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, + .init = udp_init_sock, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, - .backlog_rcv = __udp_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, diff --combined net/ipv6/ip6_tunnel.c index d3c619e,0a4759b..1f49fb1 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@@ -83,7 -83,7 +83,7 @@@ static int ip6_tnl_dev_init(struct net_ static void ip6_tnl_dev_setup(struct net_device *dev); static struct rtnl_link_ops ip6_link_ops __read_mostly;
-static int ip6_tnl_net_id __read_mostly; +static unsigned int ip6_tnl_net_id __read_mostly; struct ip6_tnl_net { /* the IPv6 tunnel fallback device */ struct net_device *fb_tnl_dev; @@@ -1034,6 -1034,7 +1034,7 @@@ int ip6_tnl_xmit(struct sk_buff *skb, s int mtu; unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; unsigned int max_headroom = psh_hlen; + bool use_cache = false; u8 hop_limit; int err = -1;
@@@ -1066,7 -1067,15 +1067,15 @@@
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); neigh_release(neigh); - } else if (!fl6->flowi6_mark) + } else if (!(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only only if the routing decision does + * not depend on the current inner header value + */ + use_cache = true; + } + + if (use_cache) dst = dst_cache_get(&t->dst_cache);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) @@@ -1150,14 -1159,14 +1159,14 @@@ route_lookup if (t->encap.type != TUNNEL_ENCAP_NONE) goto tx_err_dst_release; } else { - if (!fl6->flowi6_mark && ndst) + if (use_cache && ndst) dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); } skb_dst_set(skb, dst);
if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); - ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); + ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL, NULL); }
/* Calculate max headroom for all the headers and adjust @@@ -1240,8 -1249,6 +1249,8 @@@ ip4ip6_tnl_xmit(struct sk_buff *skb, st fl6.flowi6_mark = skb->mark; }
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1;
@@@ -1320,8 -1327,6 +1329,8 @@@ ip6ip6_tnl_xmit(struct sk_buff *skb, st fl6.flowi6_mark = skb->mark; }
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1;
@@@ -1641,7 -1646,7 +1650,7 @@@ int ip6_tnl_change_mtu(struct net_devic struct ip6_tnl *tnl = netdev_priv(dev);
if (tnl->parms.proto == IPPROTO_IPIP) { - if (new_mtu < 68) + if (new_mtu < ETH_MIN_MTU) return -EINVAL; } else { if (new_mtu < IPV6_MIN_MTU) @@@ -1794,8 -1799,6 +1803,8 @@@ ip6_tnl_dev_init_gen(struct net_device dev->mtu = ETH_DATA_LEN - t_hlen; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = 0xFFF8 - dev->hard_header_len;
return 0;
diff --combined net/ipv6/udp.c index 8fd4d89,e5056d4..ba25ec2 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@@ -302,8 -302,7 +302,8 @@@ EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb) * Does increment socket refcount. */ #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ - IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) + IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ + IS_ENABLED(CONFIG_NF_SOCKET_IPV6) struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { @@@ -335,6 -334,7 +335,6 @@@ int udpv6_recvmsg(struct sock *sk, stru int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; int is_udp4; - bool slow;
if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len, addr_len); @@@ -344,7 -344,8 +344,7 @@@
try_again: peeking = off = sk_peek_offset(sk, flags); - skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), - &peeked, &off, &err); + skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); if (!skb) return err;
@@@ -363,8 -364,7 +363,8 @@@ * coverage checksum (UDP-Lite), do it before the copy. */
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) { + if (copied < ulen || peeking || + (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { checksum_valid = !udp_lib_checksum_complete(skb); if (!checksum_valid) goto csum_copy_err; @@@ -378,6 -378,7 +378,6 @@@ goto csum_copy_err; } if (unlikely(err)) { - trace_kfree_skb(skb, udpv6_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) @@@ -387,7 -388,7 +387,7 @@@ UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - skb_free_datagram_locked(sk, skb); + kfree_skb(skb); return err; } if (!peeked) { @@@ -426,7 -427,7 +426,7 @@@
if (is_udp4) { if (inet->cmsg_flags) - ip_cmsg_recv_offset(msg, skb, + ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); } else { if (np->rxopt.all) @@@ -437,11 -438,12 +437,11 @@@ if (flags & MSG_TRUNC) err = ulen;
- __skb_free_datagram_locked(sk, skb, peeking ? -err : err); + skb_consume_udp(sk, skb, peeking ? -err : err); return err;
csum_copy_err: - slow = lock_sock_fast(sk); - if (!skb_kill_datagram(sk, skb, flags)) { + if (!__sk_queue_drop_skb(sk, skb, flags)) { if (is_udp4) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); @@@ -454,7 -456,7 +454,7 @@@ UDP_MIB_INERRORS, is_udplite); } } - unlock_sock_fast(sk, slow); + kfree_skb(skb);
/* starting over for a new packet, but check if we need to yield */ cond_resched(); @@@ -520,11 -522,9 +520,11 @@@ static int __udpv6_queue_rcv_skb(struc sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); sk_incoming_cpu_update(sk); + } else { + sk_mark_napi_id_once(sk, skb); }
- rc = __sock_queue_rcv_skb(sk, skb); + rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk);
@@@ -536,7 -536,6 +536,7 @@@ kfree_skb(skb); return -1; } + return 0; }
@@@ -558,6 -557,7 +558,6 @@@ EXPORT_SYMBOL(udpv6_encap_enable) int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); - int rc; int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) @@@ -623,10 -623,25 +623,10 @@@ goto drop;
udp_csum_pull_header(skb); - if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { - __UDP6_INC_STATS(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); - goto drop; - }
skb_dst_drop(skb);
- bh_lock_sock(sk); - rc = 0; - if (!sock_owned_by_user(sk)) - rc = __udpv6_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { - bh_unlock_sock(sk); - goto drop; - } - bh_unlock_sock(sk); - - return rc; + return __udpv6_queue_rcv_skb(sk, skb);
csum_error: __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); @@@ -691,10 -706,10 +691,10 @@@ static int __udp6_lib_mcast_deliver(str
if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & - udp_table.mask; - hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; + udptable->mask; + hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udp_table.hash2[hash2]; + hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); }
@@@ -1141,7 -1156,6 +1141,7 @@@ do_udp_sendmsg fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark; + fl6.flowi6_uid = sk->sk_uid; sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) { @@@ -1420,12 -1434,12 +1420,12 @@@ struct proto udpv6_prot = .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, + .init = udp_init_sock, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, - .backlog_rcv = __udpv6_queue_rcv_skb, .release_cb = ip6_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, diff --combined net/l2tp/l2tp_eth.c index e2c6ae0,3dc97b4..5b90eb6 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@@ -97,7 -97,7 +97,7 @@@ static int l2tp_eth_dev_xmit(struct sk_ unsigned int len = skb->len; int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
- if (likely(ret == NET_XMIT_SUCCESS)) { + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { atomic_long_add(len, &priv->tx_bytes); atomic_long_inc(&priv->tx_packets); } else { @@@ -259,8 -259,6 +259,8 @@@ static int l2tp_eth_create(struct net * session->mtu = dev->mtu - session->hdr_len; dev->mtu = session->mtu; dev->needed_headroom += session->hdr_len; + dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU;
priv = netdev_priv(dev); priv->dev = dev; diff --combined net/l2tp/l2tp_ip6.c index 1cea54f,9978d01..667ec90 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@@ -269,8 -269,6 +269,6 @@@ static int l2tp_ip6_bind(struct sock *s int addr_type; int err;
- if (!sock_flag(sk, SOCK_ZAPPED)) - return -EINVAL; if (addr->l2tp_family != AF_INET6) return -EINVAL; if (addr_len < sizeof(*addr)) @@@ -296,6 -294,9 +294,9 @@@ lock_sock(sk);
err = -EINVAL; + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out_unlock; + if (sk->sk_state != TCP_CLOSE) goto out_unlock;
@@@ -519,7 -520,6 +520,7 @@@ static int l2tp_ip6_sendmsg(struct soc memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark; + fl6.flowi6_uid = sk->sk_uid;
ipc6.hlimit = -1; ipc6.tclass = -1; diff --combined net/mac80211/sta_info.c index 236d47e,8e05032..1711bae --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@@ -688,7 -688,7 +688,7 @@@ static void __sta_info_recalc_tim(struc }
/* No need to do anything if the driver does all */ - if (!local->ops->set_tim) + if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) return;
if (sta->dead) @@@ -709,7 -709,7 +709,7 @@@ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { unsigned long tids;
- if (ignore_for_tim & BIT(ac)) + if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) continue;
indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || @@@ -1389,7 -1389,7 +1389,7 @@@ ieee80211_sta_ps_more_data(struct sta_i return true;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - if (ignored_acs & BIT(ac)) + if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) continue;
if (!skb_queue_empty(&sta->tx_filtered[ac]) || @@@ -1414,7 -1414,7 +1414,7 @@@ ieee80211_sta_ps_get_frames(struct sta_ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { unsigned long tids;
- if (ignored_acs & BIT(ac)) + if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) continue;
tids = ieee80211_tids_for_ac(ac); @@@ -1482,7 -1482,7 +1482,7 @@@ ieee80211_sta_ps_deliver_response(struc BIT(find_highest_prio_tid(driver_release_tids));
if (skb_queue_empty(&frames) && !driver_release_tids) { - int tid; + int tid, ac;
/* * For PS-Poll, this can only happen due to a race condition @@@ -1500,10 -1500,7 +1500,10 @@@ */
/* This will evaluate to 1, 3, 5 or 7. */ - tid = 7 - ((ffs(~ignored_acs) - 1) << 1); + for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) + if (ignored_acs & BIT(ac)) + continue; + tid = 7 - 2 * ac;
ieee80211_send_null_response(sta, tid, reason, true, false); } else if (!driver_release_tids) { @@@ -1874,7 -1871,10 +1874,7 @@@ int sta_info_move_state(struct sta_inf if (!sta->sta.support_p2p_ps) ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP || - (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - !sta->sdata->u.vlan.sta)) - atomic_dec(&sta->sdata->bss->num_mcast_sta); + ieee80211_vif_dec_num_mcast(sta->sdata); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ieee80211_clear_fast_xmit(sta); ieee80211_clear_fast_rx(sta); @@@ -1882,7 -1882,10 +1882,7 @@@ break; case IEEE80211_STA_AUTHORIZED: if (sta->sta_state == IEEE80211_STA_ASSOC) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP || - (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - !sta->sdata->u.vlan.sta)) - atomic_inc(&sta->sdata->bss->num_mcast_sta); + ieee80211_vif_inc_num_mcast(sta->sdata); set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ieee80211_check_fast_xmit(sta); ieee80211_check_fast_rx(sta); diff --combined net/mac80211/tx.c index 62ccaf6,bd5f4be..2c21b70 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -331,8 -331,9 +331,8 @@@ ieee80211_tx_h_check_assoc(struct ieee8 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); return TX_DROP; } - } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && - ieee80211_is_data(hdr->frame_control) && - !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) { + } else if (unlikely(ieee80211_is_data(hdr->frame_control) && + ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) { /* * No associated STAs - no need to send multicast * frames. @@@ -934,7 -935,7 +934,7 @@@ ieee80211_tx_h_fragment(struct ieee8021 if (info->flags & IEEE80211_TX_CTL_DONTFRAG) return TX_CONTINUE;
- if (tx->local->ops->set_frag_threshold) + if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) return TX_CONTINUE;
/* @@@ -1500,7 -1501,6 +1500,6 @@@ static bool ieee80211_queue_skb(struct struct sta_info *sta, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct fq *fq = &local->fq; struct ieee80211_vif *vif; struct txq_info *txqi; @@@ -1525,8 -1525,6 +1524,6 @@@ if (!txqi) return false;
- info->control.vif = vif; - spin_lock_bh(&fq->lock); ieee80211_txq_enqueue(local, txqi, skb); spin_unlock_bh(&fq->lock); @@@ -2800,7 -2798,7 +2797,7 @@@ void ieee80211_check_fast_xmit(struct s
/* fast-xmit doesn't handle fragmentation at all */ if (local->hw.wiphy->frag_threshold != (u32)-1 && - !local->ops->set_frag_threshold) + !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG)) goto out;
rcu_read_lock(); @@@ -3059,12 -3057,11 +3056,12 @@@ static bool ieee80211_amsdu_prepare_hea struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; - struct ethhdr amsdu_hdr; + struct ethhdr *amsdu_hdr; int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header); int subframe_len = skb->len - hdr_len; void *data; - u8 *qc; + u8 *qc, *h_80211_src, *h_80211_dst; + const u8 *bssid;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) return false; @@@ -3072,44 -3069,19 +3069,44 @@@ if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) return true;
- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(amsdu_hdr), + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), &subframe_len)) return false;
- amsdu_hdr.h_proto = cpu_to_be16(subframe_len); - memcpy(amsdu_hdr.h_source, skb->data + fast_tx->sa_offs, ETH_ALEN); - memcpy(amsdu_hdr.h_dest, skb->data + fast_tx->da_offs, ETH_ALEN); + data = skb_push(skb, sizeof(*amsdu_hdr)); + memmove(data, data + sizeof(*amsdu_hdr), hdr_len); + hdr = data; + amsdu_hdr = data + hdr_len; + /* h_80211_src/dst is addr* field within hdr */ + h_80211_src = data + fast_tx->sa_offs; + h_80211_dst = data + fast_tx->da_offs; + + amsdu_hdr->h_proto = cpu_to_be16(subframe_len); + ether_addr_copy(amsdu_hdr->h_source, h_80211_src); + ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst); + + /* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA + * fields needs to be changed to BSSID for A-MSDU frames depending + * on FromDS/ToDS values. + */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + bssid = sdata->u.mgd.bssid; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + bssid = sdata->vif.addr; + break; + default: + bssid = NULL; + }
- data = skb_push(skb, sizeof(amsdu_hdr)); - memmove(data, data + sizeof(amsdu_hdr), hdr_len); - memcpy(data + hdr_len, &amsdu_hdr, sizeof(amsdu_hdr)); + if (bssid && ieee80211_has_fromds(hdr->frame_control)) + ether_addr_copy(h_80211_src, bssid); + + if (bssid && ieee80211_has_tods(hdr->frame_control)) + ether_addr_copy(h_80211_dst, bssid);
- hdr = data; qc = ieee80211_get_qos_ctl(hdr); *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
@@@ -3238,7 -3210,6 +3235,6 @@@ static void ieee80211_xmit_fast_finish(
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; - *ieee80211_get_qos_ctl(hdr) = tid; hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); } else { info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; @@@ -3363,6 -3334,11 +3359,11 @@@ static bool ieee80211_xmit_fast(struct (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + *ieee80211_get_qos_ctl(hdr) = tid; + } + __skb_queue_head_init(&tx.skbs);
tx.flags = IEEE80211_TX_UNICAST; @@@ -3451,6 -3427,11 +3452,11 @@@ begin goto begin; }
+ if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) + info->flags |= IEEE80211_TX_CTL_AMPDU; + else + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { struct sta_info *sta = container_of(txq->sta, struct sta_info, sta); diff --combined net/socket.c index f9e26c6,73dc69f..e2584c5 --- a/net/socket.c +++ b/net/socket.c @@@ -341,8 -341,23 +341,23 @@@ static const struct xattr_handler sockf .get = sockfs_xattr_get, };
+ static int sockfs_security_xattr_set(const struct xattr_handler *handler, + struct dentry *dentry, struct inode *inode, + const char *suffix, const void *value, + size_t size, int flags) + { + /* Handled by LSM. */ + return -EAGAIN; + } + + static const struct xattr_handler sockfs_security_xattr_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .set = sockfs_security_xattr_set, + }; + static const struct xattr_handler *sockfs_xattr_handlers[] = { &sockfs_xattr_handler, + &sockfs_security_xattr_handler, NULL };
@@@ -518,22 -533,8 +533,22 @@@ static ssize_t sockfs_listxattr(struct return used; }
+int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) +{ + int err = simple_setattr(dentry, iattr); + + if (!err) { + struct socket *sock = SOCKET_I(d_inode(dentry)); + + sock->sk->sk_uid = iattr->ia_uid; + } + + return err; +} + static const struct inode_operations sockfs_inode_ops = { .listxattr = sockfs_listxattr, + .setattr = sockfs_setattr, };
/** @@@ -891,11 -892,6 +906,11 @@@ static long sock_do_ioctl(struct net *n * what to do with it - that's up to the protocol still. */
+static struct ns_common *get_net_ns(struct ns_common *ns) +{ + return &get_net(container_of(ns, struct net, ns))->ns; +} + static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; @@@ -964,13 -960,6 +979,13 @@@ err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; + case SIOCGSKNS: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + + err = open_related_ns(&net->ns, get_net_ns); + break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; @@@ -3121,7 -3110,6 +3136,7 @@@ static int compat_sock_ioctl_trans(stru case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: + case SIOCGSKNS: return sock_ioctl(file, cmd, arg);
case SIOCGIFFLAGS: diff --combined net/sunrpc/svcsock.c index 78da4ae,a4bc982..135ec2c --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@@ -39,7 -39,6 +39,7 @@@ #include <net/checksum.h> #include <net/ip.h> #include <net/ipv6.h> +#include <net/udp.h> #include <net/tcp.h> #include <net/tcp_states.h> #include <asm/uaccess.h> @@@ -130,18 -129,6 +130,18 @@@ static void svc_release_skb(struct svc_ } }
+static void svc_release_udp_skb(struct svc_rqst *rqstp) +{ + struct sk_buff *skb = rqstp->rq_xprt_ctxt; + + if (skb) { + rqstp->rq_xprt_ctxt = NULL; + + dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); + consume_skb(skb); + } +} + union svc_pktinfo_u { struct in_pktinfo pkti; struct in6_pktinfo pkti6; @@@ -451,6 -438,21 +451,21 @@@ static int svc_tcp_has_wspace(struct sv return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); }
+ static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt) + { + struct svc_sock *svsk; + struct socket *sock; + struct linger no_linger = { + .l_onoff = 1, + .l_linger = 0, + }; + + svsk = container_of(xprt, struct svc_sock, sk_xprt); + sock = svsk->sk_sock; + kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, + (char *)&no_linger, sizeof(no_linger)); + } + /* * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo */ @@@ -547,7 -549,7 +562,7 @@@ static int svc_udp_recvfrom(struct svc_ err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 0, 0, MSG_PEEK | MSG_DONTWAIT); if (err >= 0) - skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err); + skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
if (skb == NULL) { if (err != -EAGAIN) { @@@ -588,7 -590,7 +603,7 @@@ goto out_free; } local_bh_enable(); - skb_free_datagram_locked(svsk->sk_sk, skb); + consume_skb(skb); } else { /* we can use it in-place */ rqstp->rq_arg.head[0].iov_base = skb->data; @@@ -615,7 -617,8 +630,7 @@@
return len; out_free: - trace_kfree_skb(skb, svc_udp_recvfrom); - skb_free_datagram_locked(svsk->sk_sk, skb); + kfree_skb(skb); return 0; }
@@@ -660,6 -663,10 +675,10 @@@ static struct svc_xprt *svc_udp_accept( return NULL; }
+ static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt) + { + } + static struct svc_xprt *svc_udp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, @@@ -672,13 -679,14 +691,14 @@@ static struct svc_xprt_ops svc_udp_ops .xpo_create = svc_udp_create, .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, - .xpo_release_rqst = svc_release_skb, + .xpo_release_rqst = svc_release_udp_skb, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr, .xpo_has_wspace = svc_udp_has_wspace, .xpo_accept = svc_udp_accept, .xpo_secure_port = svc_sock_secure_port, + .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt, };
static struct svc_xprt_class svc_udp_class = { @@@ -1254,6 -1262,7 +1274,7 @@@ static struct svc_xprt_ops svc_tcp_ops .xpo_has_wspace = svc_tcp_has_wspace, .xpo_accept = svc_tcp_accept, .xpo_secure_port = svc_sock_secure_port, + .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt, };
static struct svc_xprt_class svc_tcp_class = { diff --combined net/tipc/socket.c index 22d92f0,db32777..4916d8f --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/socket.c: TIPC socket API * - * Copyright (c) 2001-2007, 2012-2015, Ericsson AB + * Copyright (c) 2001-2007, 2012-2016, Ericsson AB * Copyright (c) 2004-2008, 2010-2013, Wind River Systems * All rights reserved. * @@@ -44,43 -44,44 +44,43 @@@ #include "bcast.h" #include "netlink.h"
-#define SS_LISTENING -1 /* socket is listening */ -#define SS_READY -2 /* socket is connectionless */ - #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */ #define TIPC_FWD_MSG 1 -#define TIPC_CONN_OK 0 -#define TIPC_CONN_PROBING 1 #define TIPC_MAX_PORT 0xffffffff #define TIPC_MIN_PORT 1
+enum { + TIPC_LISTEN = TCP_LISTEN, + TIPC_ESTABLISHED = TCP_ESTABLISHED, + TIPC_OPEN = TCP_CLOSE, + TIPC_DISCONNECTING = TCP_CLOSE_WAIT, + TIPC_CONNECTING = TCP_SYN_SENT, +}; + /** * struct tipc_sock - TIPC socket structure * @sk: socket - interacts with 'port' and with user via the socket API - * @connected: non-zero if port is currently connected to a peer port * @conn_type: TIPC type used when connection was established * @conn_instance: TIPC instance used when connection was established * @published: non-zero if port has one or more associated names * @max_pkt: maximum packet size "hint" used when building messages sent by port * @portid: unique port identity in TIPC socket hash table * @phdr: preformatted message header used when sending messages - * @port_list: adjacent ports in TIPC's global list of ports * @publications: list of publications for port * @pub_count: total # of publications port has made during its lifetime * @probing_state: - * @probing_intv: * @conn_timeout: the time we can wait for an unresponded setup request * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue * @link_cong: non-zero if owner must sleep because of link congestion * @sent_unacked: # messages sent by socket, and not yet acked by peer * @rcv_unacked: # messages read by user, but not yet acked back to peer - * @remote: 'connected' peer for dgram/rdm + * @peer: 'connected' peer for dgram/rdm * @node: hash table node * @rcu: rcu struct for tipc_sock */ struct tipc_sock { struct sock sk; - int connected; u32 conn_type; u32 conn_instance; int published; @@@ -90,16 -91,17 +90,16 @@@ struct list_head sock_list; struct list_head publications; u32 pub_count; - u32 probing_state; - unsigned long probing_intv; uint conn_timeout; atomic_t dupl_rcvcnt; + bool probe_unacked; bool link_cong; u16 snt_unacked; u16 snd_win; u16 peer_caps; u16 rcv_unacked; u16 rcv_win; - struct sockaddr_tipc remote; + struct sockaddr_tipc peer; struct rhash_head node; struct rcu_head rcu; }; @@@ -127,54 -129,8 +127,8 @@@ static const struct proto_ops packet_op static const struct proto_ops stream_ops; static const struct proto_ops msg_ops; static struct proto tipc_proto; - static const struct rhashtable_params tsk_rht_params;
- /* - * Revised TIPC socket locking policy: - * - * Most socket operations take the standard socket lock when they start - * and hold it until they finish (or until they need to sleep). Acquiring - * this lock grants the owner exclusive access to the fields of the socket - * data structures, with the exception of the backlog queue. A few socket - * operations can be done without taking the socket lock because they only - * read socket information that never changes during the life of the socket. - * - * Socket operations may acquire the lock for the associated TIPC port if they - * need to perform an operation on the port. If any routine needs to acquire - * both the socket lock and the port lock it must take the socket lock first - * to avoid the risk of deadlock. - * - * The dispatcher handling incoming messages cannot grab the socket lock in - * the standard fashion, since invoked it runs at the BH level and cannot block. - * Instead, it checks to see if the socket lock is currently owned by someone, - * and either handles the message itself or adds it to the socket's backlog - * queue; in the latter case the queued message is processed once the process - * owning the socket lock releases it. - * - * NOTE: Releasing the socket lock while an operation is sleeping overcomes - * the problem of a blocked socket operation preventing any other operations - * from occurring. However, applications must be careful if they have - * multiple threads trying to send (or receive) on the same socket, as these - * operations might interfere with each other. For example, doing a connect - * and a receive at the same time might allow the receive to consume the - * ACK message meant for the connect. While additional work could be done - * to try and overcome this, it doesn't seem to be worthwhile at the present. - * - * NOTE: Releasing the socket lock while an operation is sleeping also ensures - * that another operation that must be performed in a non-blocking manner is - * not delayed for very long because the lock has already been taken. - * - * NOTE: This code assumes that certain fields of a port/socket pair are - * constant over its lifetime; such fields can be examined without taking - * the socket lock and/or port lock, and do not need to be re-read even - * after resuming processing after waiting. These fields include: - * - socket type - * - pointer to socket sk structure (aka tipc_sock structure) - * - pointer to port structure - * - port reference - */ - static u32 tsk_own_node(struct tipc_sock *tsk) { return msg_prevnode(&tsk->phdr); @@@ -292,21 -248,6 +246,21 @@@ static void tsk_rej_rx_queue(struct soc tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); }
+static bool tipc_sk_connected(struct sock *sk) +{ + return sk->sk_state == TIPC_ESTABLISHED; +} + +/* tipc_sk_type_connectionless - check if the socket is datagram socket + * @sk: socket + * + * Returns true if connection less, false otherwise + */ +static bool tipc_sk_type_connectionless(struct sock *sk) +{ + return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; +} + /* tsk_peer_msg - verify if message was sent by connected port's peer * * Handles cases where the node's network address has changed from @@@ -314,13 -255,12 +268,13 @@@ */ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) { - struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id); + struct sock *sk = &tsk->sk; + struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); u32 peer_port = tsk_peer_port(tsk); u32 orig_node; u32 peer_node;
- if (unlikely(!tsk->connected)) + if (unlikely(!tipc_sk_connected(sk))) return false;
if (unlikely(msg_origport(msg) != peer_port)) @@@ -341,45 -281,6 +295,45 @@@ return false; }
+/* tipc_set_sk_state - set the sk_state of the socket + * @sk: socket + * + * Caller must hold socket lock + * + * Returns 0 on success, errno otherwise + */ +static int tipc_set_sk_state(struct sock *sk, int state) +{ + int oldsk_state = sk->sk_state; + int res = -EINVAL; + + switch (state) { + case TIPC_OPEN: + res = 0; + break; + case TIPC_LISTEN: + case TIPC_CONNECTING: + if (oldsk_state == TIPC_OPEN) + res = 0; + break; + case TIPC_ESTABLISHED: + if (oldsk_state == TIPC_CONNECTING || + oldsk_state == TIPC_OPEN) + res = 0; + break; + case TIPC_DISCONNECTING: + if (oldsk_state == TIPC_CONNECTING || + oldsk_state == TIPC_ESTABLISHED) + res = 0; + break; + } + + if (!res) + sk->sk_state = state; + + return res; +} + /** * tipc_sk_create - create a TIPC socket * @net: network namespace (must be default network) @@@ -397,6 -298,7 +351,6 @@@ static int tipc_sk_create(struct net *n { struct tipc_net *tn; const struct proto_ops *ops; - socket_state state; struct sock *sk; struct tipc_sock *tsk; struct tipc_msg *msg; @@@ -408,13 -310,16 +362,13 @@@ switch (sock->type) { case SOCK_STREAM: ops = &stream_ops; - state = SS_UNCONNECTED; break; case SOCK_SEQPACKET: ops = &packet_ops; - state = SS_UNCONNECTED; break; case SOCK_DGRAM: case SOCK_RDM: ops = &msg_ops; - state = SS_READY; break; default: return -EPROTOTYPE; @@@ -435,15 -340,14 +389,15 @@@
/* Finish initializing socket data structures */ sock->ops = ops; - sock->state = state; sock_init_data(sock, sk); + tipc_set_sk_state(sk, TIPC_OPEN); if (tipc_sk_insert(tsk)) { pr_warn("Socket create failed; port number exhausted\n"); return -EINVAL; } msg_set_origport(msg, tsk->portid); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); + sk->sk_shutdown = 0; sk->sk_backlog_rcv = tipc_backlog_rcv; sk->sk_rcvbuf = sysctl_tipc_rmem[1]; sk->sk_data_ready = tipc_data_ready; @@@ -456,12 -360,11 +410,12 @@@ tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); tsk->rcv_win = tsk->snd_win;
- if (sock->state == SS_READY) { + if (tipc_sk_type_connectionless(sk)) { tsk_set_unreturnable(tsk, true); if (sock->type == SOCK_DGRAM) tsk_set_unreliable(tsk, true); } + return 0; }
@@@ -472,44 -375,6 +426,44 @@@ static void tipc_sk_callback(struct rcu sock_put(&tsk->sk); }
+/* Caller should hold socket lock for the socket. */ +static void __tipc_shutdown(struct socket *sock, int error) +{ + struct sock *sk = sock->sk; + struct tipc_sock *tsk = tipc_sk(sk); + struct net *net = sock_net(sk); + u32 dnode = tsk_peer_node(tsk); + struct sk_buff *skb; + + /* Reject all unreceived messages, except on an active connection + * (which disconnects locally & sends a 'FIN+' to peer). + */ + while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { + if (TIPC_SKB_CB(skb)->bytes_read) { + kfree_skb(skb); + } else { + if (!tipc_sk_type_connectionless(sk) && + sk->sk_state != TIPC_DISCONNECTING) { + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + tipc_node_remove_conn(net, dnode, tsk->portid); + } + tipc_sk_respond(sk, skb, error); + } + } + if (sk->sk_state != TIPC_DISCONNECTING) { + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, + TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, + tsk_own_node(tsk), tsk_peer_port(tsk), + tsk->portid, error); + if (skb) + tipc_node_xmit_skb(net, skb, dnode, tsk->portid); + if (!tipc_sk_type_connectionless(sk)) { + tipc_node_remove_conn(net, dnode, tsk->portid); + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + } + } +} + /** * tipc_release - destroy a TIPC socket * @sock: socket to destroy @@@ -529,7 -394,10 +483,7 @@@ static int tipc_release(struct socket *sock) { struct sock *sk = sock->sk; - struct net *net; struct tipc_sock *tsk; - struct sk_buff *skb; - u32 dnode;
/* * Exit if socket isn't fully initialized (occurs when a failed accept() @@@ -538,16 -406,47 +492,16 @@@ if (sk == NULL) return 0;
- net = sock_net(sk); tsk = tipc_sk(sk); lock_sock(sk);
- /* - * Reject all unreceived messages, except on an active connection - * (which disconnects locally & sends a 'FIN+' to peer) - */ - dnode = tsk_peer_node(tsk); - while (sock->state != SS_DISCONNECTING) { - skb = __skb_dequeue(&sk->sk_receive_queue); - if (skb == NULL) - break; - if (TIPC_SKB_CB(skb)->handle != NULL) - kfree_skb(skb); - else { - if ((sock->state == SS_CONNECTING) || - (sock->state == SS_CONNECTED)) { - sock->state = SS_DISCONNECTING; - tsk->connected = 0; - tipc_node_remove_conn(net, dnode, tsk->portid); - } - tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); - } - } - + __tipc_shutdown(sock, TIPC_ERR_NO_PORT); + sk->sk_shutdown = SHUTDOWN_MASK; tipc_sk_withdraw(tsk, 0, NULL); sk_stop_timer(sk, &sk->sk_timer); tipc_sk_remove(tsk); - if (tsk->connected) { - skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, - TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, - tsk_own_node(tsk), tsk_peer_port(tsk), - tsk->portid, TIPC_ERR_NO_PORT); - if (skb) - tipc_node_xmit_skb(net, skb, dnode, tsk->portid); - tipc_node_remove_conn(net, dnode, tsk->portid); - }
/* Reject any messages that accumulated in backlog queue */ - sock->state = SS_DISCONNECTING; release_sock(sk);
call_rcu(&tsk->rcu, tipc_sk_callback); @@@ -633,14 -532,13 +587,14 @@@ static int tipc_getname(struct socket * int *uaddr_len, int peer) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; - struct tipc_sock *tsk = tipc_sk(sock->sk); + struct sock *sk = sock->sk; + struct tipc_sock *tsk = tipc_sk(sk); struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
memset(addr, 0, sizeof(*addr)); if (peer) { - if ((sock->state != SS_CONNECTED) && - ((peer != 2) || (sock->state != SS_DISCONNECTING))) + if ((!tipc_sk_connected(sk)) && + ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) return -ENOTCONN; addr->addr.id.ref = tsk_peer_port(tsk); addr->addr.id.node = tsk_peer_node(tsk); @@@ -672,6 -570,28 +626,6 @@@ * exits. TCP and other protocols seem to rely on higher level poll routines * to handle any preventable race conditions, so TIPC will do the same ... * - * TIPC sets the returned events as follows: - * - * socket state flags set - * ------------ --------- - * unconnected no read flags - * POLLOUT if port is not congested - * - * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue - * no write flags - * - * connected POLLIN/POLLRDNORM if data in rx queue - * POLLOUT if port is not congested - * - * disconnecting POLLIN/POLLRDNORM/POLLHUP - * no write flags - * - * listening POLLIN if SYN in rx queue - * no write flags - * - * ready POLLIN/POLLRDNORM if data in rx queue - * [connectionless] POLLOUT (since port cannot be congested) - * * IMPORTANT: The fact that a read or write operation is indicated does NOT * imply that the operation will succeed, merely that it should be performed * and will not block. @@@ -685,29 -605,22 +639,29 @@@ static unsigned int tipc_poll(struct fi
sock_poll_wait(file, sk_sleep(sk), wait);
- switch ((int)sock->state) { - case SS_UNCONNECTED: - if (!tsk->link_cong) - mask |= POLLOUT; - break; - case SS_READY: - case SS_CONNECTED: + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLRDHUP | POLLIN | POLLRDNORM; + if (sk->sk_shutdown == SHUTDOWN_MASK) + mask |= POLLHUP; + + switch (sk->sk_state) { + case TIPC_ESTABLISHED: if (!tsk->link_cong && !tsk_conn_cong(tsk)) mask |= POLLOUT; /* fall thru' */ - case SS_CONNECTING: - case SS_LISTENING: + case TIPC_LISTEN: + case TIPC_CONNECTING: if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= (POLLIN | POLLRDNORM); break; - case SS_DISCONNECTING: + case TIPC_OPEN: + if (!tsk->link_cong) + mask |= POLLOUT; + if (tipc_sk_type_connectionless(sk) && + (!skb_queue_empty(&sk->sk_receive_queue))) + mask |= (POLLIN | POLLRDNORM); + break; + case TIPC_DISCONNECTING: mask = (POLLIN | POLLRDNORM | POLLHUP); break; } @@@ -738,9 -651,6 +692,9 @@@ static int tipc_sendmcast(struct socke uint mtu; int rc;
+ if (!timeo && tsk->link_cong) + return -ELINKCONG; + msg_set_type(mhdr, TIPC_MCAST_MSG); msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE); msg_set_destport(mhdr, 0); @@@ -853,7 -763,7 +807,7 @@@ static void tipc_sk_proto_rcv(struct ti if (!tsk_peer_msg(tsk, hdr)) goto exit;
- tsk->probing_state = TIPC_CONN_OK; + tsk->probe_unacked = false;
if (mtyp == CONN_PROBE) { msg_set_type(hdr, CONN_PROBE_REPLY); @@@ -876,25 -786,25 +830,25 @@@ exit
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - DEFINE_WAIT(wait); int done;
do { int err = sock_error(sk); if (err) return err; - if (sock->state == SS_DISCONNECTING) + if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; if (!*timeo_p) return -EAGAIN; if (signal_pending(current)) return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - done = sk_wait_event(sk, timeo_p, !tsk->link_cong); - finish_wait(sk_sleep(sk), &wait); + add_wait_queue(sk_sleep(sk), &wait); + done = sk_wait_event(sk, timeo_p, !tsk->link_cong, &wait); + remove_wait_queue(sk_sleep(sk), &wait); } while (!done); return 0; } @@@ -934,7 -844,6 +888,7 @@@ static int __tipc_sendmsg(struct socke struct tipc_msg *mhdr = &tsk->phdr; u32 dnode, dport; struct sk_buff_head pktchain; + bool is_connectionless = tipc_sk_type_connectionless(sk); struct sk_buff *skb; struct tipc_name_seq *seq; struct iov_iter save; @@@ -945,18 -854,18 +899,18 @@@ if (dsz > TIPC_MAX_USER_MSG_SIZE) return -EMSGSIZE; if (unlikely(!dest)) { - if (tsk->connected && sock->state == SS_READY) - dest = &tsk->remote; + if (is_connectionless && tsk->peer.family == AF_TIPC) + dest = &tsk->peer; else return -EDESTADDRREQ; } else if (unlikely(m->msg_namelen < sizeof(*dest)) || dest->family != AF_TIPC) { return -EINVAL; } - if (unlikely(sock->state != SS_READY)) { - if (sock->state == SS_LISTENING) + if (!is_connectionless) { + if (sk->sk_state == TIPC_LISTEN) return -EPIPE; - if (sock->state != SS_UNCONNECTED) + if (sk->sk_state != TIPC_OPEN) return -EISCONN; if (tsk->published) return -EOPNOTSUPP; @@@ -1008,8 -917,8 +962,8 @@@ new_mtu TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid); if (likely(!rc)) { - if (sock->state != SS_READY) - sock->state = SS_CONNECTING; + if (!is_connectionless) + tipc_set_sk_state(sk, TIPC_CONNECTING); return dsz; } if (rc == -ELINKCONG) { @@@ -1031,30 -940,30 +985,30 @@@
static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - DEFINE_WAIT(wait); int done;
do { int err = sock_error(sk); if (err) return err; - if (sock->state == SS_DISCONNECTING) + if (sk->sk_state == TIPC_DISCONNECTING) return -EPIPE; - else if (sock->state != SS_CONNECTED) + else if (!tipc_sk_connected(sk)) return -ENOTCONN; if (!*timeo_p) return -EAGAIN; if (signal_pending(current)) return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); done = sk_wait_event(sk, timeo_p, (!tsk->link_cong && !tsk_conn_cong(tsk)) || - !tsk->connected); - finish_wait(sk_sleep(sk), &wait); + !tipc_sk_connected(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); } while (!done); return 0; } @@@ -1109,17 -1018,14 +1063,17 @@@ static int __tipc_send_stream(struct so if (dsz > (uint)INT_MAX) return -EMSGSIZE;
- if (unlikely(sock->state != SS_CONNECTED)) { - if (sock->state == SS_DISCONNECTING) + if (unlikely(!tipc_sk_connected(sk))) { + if (sk->sk_state == TIPC_DISCONNECTING) return -EPIPE; else return -ENOTCONN; }
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); + if (!timeo && tsk->link_cong) + return -ELINKCONG; + dnode = tsk_peer_node(tsk); skb_queue_head_init(&pktchain);
@@@ -1193,8 -1099,10 +1147,8 @@@ static void tipc_sk_finish_conn(struct msg_set_lookup_scope(msg, 0); msg_set_hdr_sz(msg, SHORT_H_SIZE);
- tsk->probing_intv = CONN_PROBING_INTERVAL; - tsk->probing_state = TIPC_CONN_OK; - tsk->connected = 1; - sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); + sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL); + tipc_set_sk_state(sk, TIPC_ESTABLISHED); tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); @@@ -1302,14 -1210,13 +1256,14 @@@ static int tipc_sk_anc_data_recv(struc
static void tipc_sk_send_ack(struct tipc_sock *tsk) { - struct net *net = sock_net(&tsk->sk); + struct sock *sk = &tsk->sk; + struct net *net = sock_net(sk); struct sk_buff *skb = NULL; struct tipc_msg *msg; u32 peer_port = tsk_peer_port(tsk); u32 dnode = tsk_peer_node(tsk);
- if (!tsk->connected) + if (!tipc_sk_connected(sk)) return; skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, tsk_own_node(tsk), peer_port, @@@ -1338,7 -1245,7 +1292,7 @@@ static int tipc_wait_for_rcvmsg(struct for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { - if (sock->state == SS_DISCONNECTING) { + if (sk->sk_shutdown & RCV_SHUTDOWN) { err = -ENOTCONN; break; } @@@ -1379,7 -1286,6 +1333,7 @@@ static int tipc_recvmsg(struct socket * struct tipc_sock *tsk = tipc_sk(sk); struct sk_buff *buf; struct tipc_msg *msg; + bool is_connectionless = tipc_sk_type_connectionless(sk); long timeo; unsigned int sz; u32 err; @@@ -1391,7 -1297,7 +1345,7 @@@
lock_sock(sk);
- if (unlikely(sock->state == SS_UNCONNECTED)) { + if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) { res = -ENOTCONN; goto exit; } @@@ -1436,8 -1342,8 +1390,8 @@@ restart goto exit; res = sz; } else { - if ((sock->state == SS_READY) || - ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)) + if (is_connectionless || err == TIPC_CONN_SHUTDOWN || + m->msg_control) res = 0; else res = -ECONNRESET; @@@ -1446,7 -1352,7 +1400,7 @@@ if (unlikely(flags & MSG_PEEK)) goto exit;
- if (likely(sock->state != SS_READY)) { + if (likely(!is_connectionless)) { tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) tipc_sk_send_ack(tsk); @@@ -1477,7 -1383,7 +1431,7 @@@ static int tipc_recv_stream(struct sock struct tipc_msg *msg; long timeo; unsigned int sz; - int sz_to_copy, target, needed; + int target; int sz_copied = 0; u32 err; int res = 0, hlen; @@@ -1488,7 -1394,7 +1442,7 @@@
lock_sock(sk);
- if (unlikely(sock->state == SS_UNCONNECTED)) { + if (unlikely(sk->sk_state == TIPC_OPEN)) { res = -ENOTCONN; goto exit; } @@@ -1525,13 -1431,11 +1479,13 @@@ restart
/* Capture message data (if valid) & compute return value (always) */ if (!err) { - u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); + u32 offset = TIPC_SKB_CB(buf)->bytes_read; + u32 needed; + int sz_to_copy;
sz -= offset; needed = (buf_len - sz_copied); - sz_to_copy = (sz <= needed) ? sz : needed; + sz_to_copy = min(sz, needed);
res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy); if (res) @@@ -1541,8 -1445,8 +1495,8 @@@
if (sz_to_copy < sz) { if (!(flags & MSG_PEEK)) - TIPC_SKB_CB(buf)->handle = - (void *)(unsigned long)(offset + sz_to_copy); + TIPC_SKB_CB(buf)->bytes_read = + offset + sz_to_copy; goto exit; } } else { @@@ -1624,31 -1528,49 +1578,31 @@@ static bool filter_connect(struct tipc_ { struct sock *sk = &tsk->sk; struct net *net = sock_net(sk); - struct socket *sock = sk->sk_socket; struct tipc_msg *hdr = buf_msg(skb);
if (unlikely(msg_mcast(hdr))) return false;
- switch ((int)sock->state) { - case SS_CONNECTED: - - /* Accept only connection-based messages sent by peer */ - if (unlikely(!tsk_peer_msg(tsk, hdr))) - return false; - - if (unlikely(msg_errcode(hdr))) { - sock->state = SS_DISCONNECTING; - tsk->connected = 0; - /* Let timer expire on it's own */ - tipc_node_remove_conn(net, tsk_peer_node(tsk), - tsk->portid); - } - return true; - - case SS_CONNECTING: - + switch (sk->sk_state) { + case TIPC_CONNECTING: /* Accept only ACK or NACK message */ if (unlikely(!msg_connected(hdr))) return false;
if (unlikely(msg_errcode(hdr))) { - sock->state = SS_DISCONNECTING; + tipc_set_sk_state(sk, TIPC_DISCONNECTING); sk->sk_err = ECONNREFUSED; return true; }
if (unlikely(!msg_isdata(hdr))) { - sock->state = SS_DISCONNECTING; + tipc_set_sk_state(sk, TIPC_DISCONNECTING); sk->sk_err = EINVAL; return true; }
tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); msg_set_importance(&tsk->phdr, msg_importance(hdr)); - sock->state = SS_CONNECTED;
/* If 'ACK+' message, add to socket receive queue */ if (msg_data_sz(hdr)) @@@ -1662,31 -1584,18 +1616,31 @@@ msg_set_dest_droppable(hdr, 1); return false;
- case SS_LISTENING: - case SS_UNCONNECTED: - + case TIPC_OPEN: + case TIPC_DISCONNECTING: + break; + case TIPC_LISTEN: /* Accept only SYN message */ if (!msg_connected(hdr) && !(msg_errcode(hdr))) return true; break; - case SS_DISCONNECTING: - break; + case TIPC_ESTABLISHED: + /* Accept only connection-based messages sent by peer */ + if (unlikely(!tsk_peer_msg(tsk, hdr))) + return false; + + if (unlikely(msg_errcode(hdr))) { + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + /* Let timer expire on it's own */ + tipc_node_remove_conn(net, tsk_peer_node(tsk), + tsk->portid); + sk->sk_state_change(sk); + } + return true; default: - pr_err("Unknown socket state %u\n", sock->state); + pr_err("Unknown sk_state %u\n", sk->sk_state); } + return false; }
@@@ -1737,6 -1646,7 +1691,6 @@@ static unsigned int rcvbuf_limit(struc static bool filter_rcv(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *xmitq) { - struct socket *sock = sk->sk_socket; struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *hdr = buf_msg(skb); unsigned int limit = rcvbuf_limit(sk, skb); @@@ -1762,7 -1672,7 +1716,7 @@@ }
/* Reject if wrong message type for current socket state */ - if (unlikely(sock->state == SS_READY)) { + if (tipc_sk_type_connectionless(sk)) { if (msg_connected(hdr)) { err = TIPC_ERR_NO_PORT; goto reject; @@@ -1779,7 -1689,7 +1733,7 @@@ }
/* Enqueue message */ - TIPC_SKB_CB(skb)->handle = NULL; + TIPC_SKB_CB(skb)->bytes_read = 0; __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk);
@@@ -1929,8 -1839,8 +1883,8 @@@ xmit
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk; - DEFINE_WAIT(wait); int done;
do { @@@ -1942,10 -1852,9 +1896,10 @@@ if (signal_pending(current)) return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING); - finish_wait(sk_sleep(sk), &wait); + add_wait_queue(sk_sleep(sk), &wait); + done = sk_wait_event(sk, timeo_p, + sk->sk_state != TIPC_CONNECTING, &wait); + remove_wait_queue(sk_sleep(sk), &wait); } while (!done); return 0; } @@@ -1967,19 -1876,21 +1921,19 @@@ static int tipc_connect(struct socket * struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; struct msghdr m = {NULL,}; long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; - socket_state previous; + int previous; int res = 0;
lock_sock(sk);
/* DGRAM/RDM connect(), just save the destaddr */ - if (sock->state == SS_READY) { + if (tipc_sk_type_connectionless(sk)) { if (dst->family == AF_UNSPEC) { - memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc)); - tsk->connected = 0; + memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); } else if (destlen != sizeof(struct sockaddr_tipc)) { res = -EINVAL; } else { - memcpy(&tsk->remote, dest, destlen); - tsk->connected = 1; + memcpy(&tsk->peer, dest, destlen); } goto exit; } @@@ -1995,10 -1906,9 +1949,10 @@@ goto exit; }
- previous = sock->state; - switch (sock->state) { - case SS_UNCONNECTED: + previous = sk->sk_state; + + switch (sk->sk_state) { + case TIPC_OPEN: /* Send a 'SYN-' to destination */ m.msg_name = dest; m.msg_namelen = destlen; @@@ -2013,29 -1923,27 +1967,29 @@@ if ((res < 0) && (res != -EWOULDBLOCK)) goto exit;
- /* Just entered SS_CONNECTING state; the only + /* Just entered TIPC_CONNECTING state; the only * difference is that return value in non-blocking * case is EINPROGRESS, rather than EALREADY. */ res = -EINPROGRESS; - case SS_CONNECTING: - if (previous == SS_CONNECTING) - res = -EALREADY; - if (!timeout) + /* fall thru' */ + case TIPC_CONNECTING: + if (!timeout) { + if (previous == TIPC_CONNECTING) + res = -EALREADY; goto exit; + } timeout = msecs_to_jiffies(timeout); /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ res = tipc_wait_for_connect(sock, &timeout); break; - case SS_CONNECTED: + case TIPC_ESTABLISHED: res = -EISCONN; break; default: res = -EINVAL; - break; } + exit: release_sock(sk); return res; @@@ -2054,9 -1962,15 +2008,9 @@@ static int tipc_listen(struct socket *s int res;
lock_sock(sk); - - if (sock->state != SS_UNCONNECTED) - res = -EINVAL; - else { - sock->state = SS_LISTENING; - res = 0; - } - + res = tipc_set_sk_state(sk, TIPC_LISTEN); release_sock(sk); + return res; }
@@@ -2082,6 -1996,9 +2036,6 @@@ static int tipc_wait_for_accept(struct err = 0; if (!skb_queue_empty(&sk->sk_receive_queue)) break; - err = -EINVAL; - if (sock->state != SS_LISTENING) - break; err = -EAGAIN; if (!timeo) break; @@@ -2112,7 -2029,7 +2066,7 @@@ static int tipc_accept(struct socket *s
lock_sock(sk);
- if (sock->state != SS_LISTENING) { + if (sk->sk_state != TIPC_LISTEN) { res = -EINVAL; goto exit; } @@@ -2123,7 -2040,7 +2077,7 @@@
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); + res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0); if (res) goto exit; security_sk_clone(sock->sk, new_sock->sk); @@@ -2143,6 -2060,7 +2097,6 @@@
/* Connect new socket to it's peer */ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); - new_sock->state = SS_CONNECTED;
tsk_set_importance(new_tsock, msg_importance(msg)); if (msg_named(msg)) { @@@ -2182,6 -2100,13 +2136,6 @@@ exit static int tipc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; - struct net *net = sock_net(sk); - struct tipc_sock *tsk = tipc_sk(sk); - struct sk_buff *skb; - u32 dnode = tsk_peer_node(tsk); - u32 dport = tsk_peer_port(tsk); - u32 onode = tipc_own_addr(net); - u32 oport = tsk->portid; int res;
if (how != SHUT_RDWR) @@@ -2189,17 -2114,45 +2143,17 @@@
lock_sock(sk);
- switch (sock->state) { - case SS_CONNECTING: - case SS_CONNECTED: - -restart: - dnode = tsk_peer_node(tsk); - - /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ - skb = __skb_dequeue(&sk->sk_receive_queue); - if (skb) { - if (TIPC_SKB_CB(skb)->handle != NULL) { - kfree_skb(skb); - goto restart; - } - tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN); - } else { - skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, - TIPC_CONN_MSG, SHORT_H_SIZE, - 0, dnode, onode, dport, oport, - TIPC_CONN_SHUTDOWN); - if (skb) - tipc_node_xmit_skb(net, skb, dnode, tsk->portid); - } - tsk->connected = 0; - sock->state = SS_DISCONNECTING; - tipc_node_remove_conn(net, dnode, tsk->portid); - /* fall through */ - - case SS_DISCONNECTING: + __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); + sk->sk_shutdown = SEND_SHUTDOWN;
+ if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue);
/* Wake up anyone sleeping in poll */ sk->sk_state_change(sk); res = 0; - break; - - default: + } else { res = -ENOTCONN; }
@@@ -2216,16 -2169,17 +2170,16 @@@ static void tipc_sk_timeout(unsigned lo u32 own_node = tsk_own_node(tsk);
bh_lock_sock(sk); - if (!tsk->connected) { + if (!tipc_sk_connected(sk)) { bh_unlock_sock(sk); goto exit; } peer_port = tsk_peer_port(tsk); peer_node = tsk_peer_node(tsk);
- if (tsk->probing_state == TIPC_CONN_PROBING) { + if (tsk->probe_unacked) { if (!sock_owned_by_user(sk)) { - sk->sk_socket->state = SS_DISCONNECTING; - tsk->connected = 0; + tipc_set_sk_state(sk, TIPC_DISCONNECTING); tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), tsk_peer_port(tsk)); sk->sk_state_change(sk); @@@ -2234,15 -2188,13 +2188,15 @@@ sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); }
- } else { - skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, - INT_H_SIZE, 0, peer_node, own_node, - peer_port, tsk->portid, TIPC_OK); - tsk->probing_state = TIPC_CONN_PROBING; - sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); + bh_unlock_sock(sk); + goto exit; } + + skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, + INT_H_SIZE, 0, peer_node, own_node, + peer_port, tsk->portid, TIPC_OK); + tsk->probe_unacked = true; + sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL); bh_unlock_sock(sk); if (skb) tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid); @@@ -2253,12 -2205,11 +2207,12 @@@ exit static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, struct tipc_name_seq const *seq) { - struct net *net = sock_net(&tsk->sk); + struct sock *sk = &tsk->sk; + struct net *net = sock_net(sk); struct publication *publ; u32 key;
- if (tsk->connected) + if (tipc_sk_connected(sk)) return -EINVAL; key = tsk->portid + tsk->pub_count + 1; if (key == tsk->portid) @@@ -2716,7 -2667,6 +2670,7 @@@ static int __tipc_nl_add_sk(struct sk_b struct nlattr *attrs; struct net *net = sock_net(skb->sk); struct tipc_net *tn = net_generic(net, tipc_net_id); + struct sock *sk = &tsk->sk;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); @@@ -2731,7 -2681,7 +2685,7 @@@ if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr)) goto attr_msg_cancel;
- if (tsk->connected) { + if (tipc_sk_connected(sk)) { err = __tipc_nl_add_sk_con(skb, tsk); if (err) goto attr_msg_cancel; diff --combined net/unix/af_unix.c index 6a705d0,2358f26..1752d6b --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@@ -2113,8 -2113,8 +2113,8 @@@ static int unix_dgram_recvmsg(struct so mutex_lock(&u->iolock);
skip = sk_peek_offset(sk, flags); - skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err, - &last); + skb = __skb_try_recv_datagram(sk, flags, NULL, &peeked, &skip, + &err, &last); if (skb) break;
@@@ -2199,7 -2199,8 +2199,8 @@@ out * Sleep until more data has arrived. But check for races.. */ static long unix_stream_data_wait(struct sock *sk, long timeo, - struct sk_buff *last, unsigned int last_len) + struct sk_buff *last, unsigned int last_len, + bool freezable) { struct sk_buff *tail; DEFINE_WAIT(wait); @@@ -2220,7 -2221,10 +2221,10 @@@
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); unix_state_unlock(sk); - timeo = freezable_schedule_timeout(timeo); + if (freezable) + timeo = freezable_schedule_timeout(timeo); + else + timeo = schedule_timeout(timeo); unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) @@@ -2250,7 -2254,8 +2254,8 @@@ struct unix_stream_read_state unsigned int splice_flags; };
- static int unix_stream_read_generic(struct unix_stream_read_state *state) + static int unix_stream_read_generic(struct unix_stream_read_state *state, + bool freezable) { struct scm_cookie scm; struct socket *sock = state->socket; @@@ -2330,7 -2335,7 +2335,7 @@@ again mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last, - last_len); + last_len, freezable);
if (signal_pending(current)) { err = sock_intr_errno(timeo); @@@ -2472,7 -2477,7 +2477,7 @@@ static int unix_stream_recvmsg(struct s .flags = flags };
- return unix_stream_read_generic(&state); + return unix_stream_read_generic(&state, true); }
static int unix_stream_splice_actor(struct sk_buff *skb, @@@ -2503,7 -2508,7 +2508,7 @@@ static ssize_t unix_stream_splice_read( flags & SPLICE_F_NONBLOCK) state.flags = MSG_DONTWAIT;
- return unix_stream_read_generic(&state); + return unix_stream_read_generic(&state, false); }
static int unix_shutdown(struct socket *sock, int mode) diff --combined net/wireless/core.h index fb2fcd5,f0c0c8a..ec5f333 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@@ -71,6 -71,7 +71,7 @@@ struct cfg80211_registered_device struct list_head bss_list; struct rb_root bss_tree; u32 bss_generation; + u32 bss_entries; struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; struct cfg80211_sched_scan_request __rcu *sched_scan_req; @@@ -345,7 -346,7 +346,7 @@@ int cfg80211_mlme_auth(struct cfg80211_ const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, const u8 *key, int key_len, int key_idx, - const u8 *sae_data, int sae_data_len); + const u8 *auth_data, int auth_data_len); int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, @@@ -475,7 -476,7 +476,7 @@@ int ieee80211_get_ratemask(struct ieee8 u32 *mask);
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, - u32 beacon_int); + enum nl80211_iftype iftype, u32 beacon_int);
void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num); diff --combined net/wireless/util.c index 88725f8,659b507..e9d040d --- a/net/wireless/util.c +++ b/net/wireless/util.c @@@ -13,7 -13,6 +13,7 @@@ #include <net/dsfield.h> #include <linux/if_vlan.h> #include <linux/mpls.h> +#include <linux/gcd.h> #include "core.h" #include "rdev-ops.h"
@@@ -1159,7 -1158,8 +1159,8 @@@ static u32 cfg80211_calculate_bitrate_v 58500000, 65000000, 78000000, - 0, + /* not in the spec, but some devices use this: */ + 86500000, }, { 13500000, 27000000, @@@ -1378,25 -1378,6 +1379,25 @@@ static bool ieee80211_id_in_list(const return false; }
+static size_t skip_ie(const u8 *ies, size_t ielen, size_t pos) +{ + /* we assume a validly formed IEs buffer */ + u8 len = ies[pos + 1]; + + pos += 2 + len; + + /* the IE itself must have 255 bytes for fragments to follow */ + if (len < 255) + return pos; + + while (pos < ielen && ies[pos] == WLAN_EID_FRAGMENT) { + len = ies[pos + 1]; + pos += 2 + len; + } + + return pos; +} + size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, const u8 *after_ric, int n_after_ric, @@@ -1406,14 -1387,14 +1407,14 @@@
while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) { if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) { - pos += 2 + ies[pos + 1]; + pos = skip_ie(ies, ielen, pos);
while (pos < ielen && !ieee80211_id_in_list(after_ric, n_after_ric, ies[pos])) - pos += 2 + ies[pos + 1]; + pos = skip_ie(ies, ielen, pos); } else { - pos += 2 + ies[pos + 1]; + pos = skip_ie(ies, ielen, pos); } }
@@@ -1574,57 -1555,31 +1575,57 @@@ bool ieee80211_chandef_to_operating_cla } EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
-int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, - u32 beacon_int) +static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, + u32 *beacon_int_gcd, + bool *beacon_int_different) { struct wireless_dev *wdev; - int res = 0;
- if (beacon_int < 10 || beacon_int > 10000) - return -EINVAL; + *beacon_int_gcd = 0; + *beacon_int_different = false;
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + list_for_each_entry(wdev, &wiphy->wdev_list, list) { if (!wdev->beacon_interval) continue; - if (wdev->beacon_interval != beacon_int) { - res = -EINVAL; - break; + + if (!*beacon_int_gcd) { + *beacon_int_gcd = wdev->beacon_interval; + continue; } + + if (wdev->beacon_interval == *beacon_int_gcd) + continue; + + *beacon_int_different = true; + *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval); }
- return res; + if (new_beacon_int && *beacon_int_gcd != new_beacon_int) { + if (*beacon_int_gcd) + *beacon_int_different = true; + *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int); + } +} + +int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, + enum nl80211_iftype iftype, u32 beacon_int) +{ + /* + * This is just a basic pre-condition check; if interface combinations + * are possible the driver must already be checking those with a call + * to cfg80211_check_combinations(), in which case we'll validate more + * through the cfg80211_calculate_bi_data() call and code in + * cfg80211_iter_combinations(). + */ + + if (beacon_int < 10 || beacon_int > 10000) + return -EINVAL; + + return 0; }
int cfg80211_iter_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES], + struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data) @@@ -1634,23 -1589,8 +1635,23 @@@ int i, j, iftype; int num_interfaces = 0; u32 used_iftypes = 0; + u32 beacon_int_gcd; + bool beacon_int_different; + + /* + * This is a bit strange, since the iteration used to rely only on + * the data given by the driver, but here it now relies on context, + * in form of the currently operating interfaces. + * This is OK for all current users, and saves us from having to + * push the GCD calculations into all the drivers. + * In the future, this should probably rely more on data that's in + * cfg80211 already - the only thing not would appear to be any new + * interfaces (while being brought up) and channel/radar data. + */ + cfg80211_calculate_bi_data(wiphy, params->new_beacon_int, + &beacon_int_gcd, &beacon_int_different);
- if (radar_detect) { + if (params->radar_detect) { rcu_read_lock(); regdom = rcu_dereference(cfg80211_regdomain); if (regdom) @@@ -1659,8 -1599,8 +1660,8 @@@ }
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { - num_interfaces += iftype_num[iftype]; - if (iftype_num[iftype] > 0 && + num_interfaces += params->iftype_num[iftype]; + if (params->iftype_num[iftype] > 0 && !(wiphy->software_iftypes & BIT(iftype))) used_iftypes |= BIT(iftype); } @@@ -1674,7 -1614,7 +1675,7 @@@
if (num_interfaces > c->max_interfaces) continue; - if (num_different_channels > c->num_different_channels) + if (params->num_different_channels > c->num_different_channels) continue;
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, @@@ -1689,17 -1629,16 +1690,17 @@@ all_iftypes |= limits[j].types; if (!(limits[j].types & BIT(iftype))) continue; - if (limits[j].max < iftype_num[iftype]) + if (limits[j].max < params->iftype_num[iftype]) goto cont; - limits[j].max -= iftype_num[iftype]; + limits[j].max -= params->iftype_num[iftype]; } }
- if (radar_detect != (c->radar_detect_widths & radar_detect)) + if (params->radar_detect != + (c->radar_detect_widths & params->radar_detect)) goto cont;
- if (radar_detect && c->radar_detect_regions && + if (params->radar_detect && c->radar_detect_regions && !(c->radar_detect_regions & BIT(region))) goto cont;
@@@ -1711,14 -1650,6 +1712,14 @@@ if ((all_iftypes & used_iftypes) != used_iftypes) goto cont;
+ if (beacon_int_gcd) { + if (c->beacon_int_min_gcd && + beacon_int_gcd < c->beacon_int_min_gcd) + goto cont; + if (!c->beacon_int_min_gcd && beacon_int_different) + goto cont; + } + /* This combination covered all interface types and * supported the requested numbers, so we're good. */ @@@ -1741,11 -1672,14 +1742,11 @@@ cfg80211_iter_sum_ifcombs(const struct }
int cfg80211_check_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES]) + struct iface_combination_params *params) { int err, num = 0;
- err = cfg80211_iter_combinations(wiphy, num_different_channels, - radar_detect, iftype_num, + err = cfg80211_iter_combinations(wiphy, params, cfg80211_iter_sum_ifcombs, &num); if (err) return err;
linux-merge@lists.open-mesh.org