The following commit has been merged in the master branch: commit c0b458a9463bd6be165374a8e9e3235800ee132e Merge: 859a59352e926315b6384c5fd895b00a30659a12 b5dbc28762fd3fd40ba76303be0c7f707826f982 Author: David S. Miller davem@davemloft.net Date: Sun Apr 1 19:49:34 2018 -0400
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflicts in drivers/net/ethernet/mellanox/mlx5/core/en_rep.c, we had some overlapping changes:
1) In 'net' MLX5E_PARAMS_LOG_{SQ,RQ}_SIZE --> MLX5E_REP_PARAMS_LOG_{SQ,RQ}_SIZE
2) In 'net-next' params->log_rq_size is renamed to be params->log_rq_mtu_frames.
3) In 'net-next' params->hard_mtu is added.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index 22ef8d64fa59,6e950b8b4a41..cbffcd4b1320 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1060,41 -1060,42 +1060,42 @@@ ARM POR M: Russell King linux@armlinux.org.uk L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ - S: Maintained + S: Odd Fixes T: git git://git.armlinux.org.uk/~rmk/linux-arm.git F: arch/arm/ + X: arch/arm/boot/dts/
ARM PRIMECELL AACI PL041 DRIVER M: Russell King linux@armlinux.org.uk - S: Maintained + S: Odd Fixes F: sound/arm/aaci.*
ARM PRIMECELL BUS SUPPORT M: Russell King linux@armlinux.org.uk - S: Maintained + S: Odd Fixes F: drivers/amba/ F: include/linux/amba/bus.h
ARM PRIMECELL CLCD PL110 DRIVER M: Russell King linux@armlinux.org.uk - S: Maintained + S: Odd Fixes F: drivers/video/fbdev/amba-clcd.*
ARM PRIMECELL KMI PL050 DRIVER M: Russell King linux@armlinux.org.uk - S: Maintained + S: Odd Fixes F: drivers/input/serio/ambakmi.* F: include/linux/amba/kmi.h
ARM PRIMECELL MMCI PL180/1 DRIVER M: Russell King linux@armlinux.org.uk - S: Maintained + S: Odd Fixes F: drivers/mmc/host/mmci.* F: include/linux/amba/mmci.h
ARM PRIMECELL UART PL010 AND PL011 DRIVERS M: Russell King linux@armlinux.org.uk - S: Maintained + S: Odd Fixes F: drivers/tty/serial/amba-pl01*.c F: include/linux/amba/serial.h
@@@ -1152,7 -1153,7 +1153,7 @@@ S: Maintaine F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support - M: Maxime Ripard maxime.ripard@free-electrons.com + M: Maxime Ripard maxime.ripard@bootlin.com M: Chen-Yu Tsai wens@csie.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -4626,7 -4627,7 +4627,7 @@@ F: include/uapi/drm/drm F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10 - M: Maxime Ripard maxime.ripard@free-electrons.com + M: Maxime Ripard maxime.ripard@bootlin.com L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ @@@ -5637,7 -5638,7 +5638,7 @@@ S: Maintaine F: drivers/dma/fsldma.*
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR) -M: Claudiu Manoil claudiu.manoil@freescale.com +M: Claudiu Manoil claudiu.manoil@nxp.com L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/gianfar* @@@ -7063,7 -7064,6 +7064,7 @@@ F: Documentation/networking/ixgbe.tx F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt F: Documentation/networking/i40evf.txt +F: Documentation/networking/ice.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ F: include/linux/avf/virtchnl.h @@@ -8435,7 -8435,7 +8436,7 @@@ S: Orpha F: drivers/net/wireless/marvell/libertas/
MARVELL MACCHIATOBIN SUPPORT - M: Russell King rmk@armlinux.org.uk + M: Russell King linux@armlinux.org.uk L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts @@@ -8448,7 -8448,7 +8449,7 @@@ F: drivers/net/ethernet/marvell/mv643xx F: include/linux/mv643xx.h
MARVELL MV88X3310 PHY DRIVER - M: Russell King rmk@armlinux.org.uk + M: Russell King linux@armlinux.org.uk L: netdev@vger.kernel.org S: Maintained F: drivers/net/phy/marvell10g.c @@@ -8597,15 -8597,6 +8598,15 @@@ S: Maintaine F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 F: drivers/iio/potentiometer/mcp4531.c
+MCR20A IEEE-802.15.4 RADIO DRIVER +M: Xue Liu liuxuenetmail@gmail.com +L: linux-wpan@vger.kernel.org +W: https://github.com/xueliu/mcr20a-linux +S: Maintained +F: drivers/net/ieee802154/mcr20a.c +F: drivers/net/ieee802154/mcr20a.h +F: Documentation/devicetree/bindings/net/ieee802154/mcr20a.txt + MEASUREMENT COMPUTING CIO-DAC IIO DRIVER M: William Breathitt Gray vilhelm.gray@gmail.com L: linux-iio@vger.kernel.org @@@ -9162,13 -9153,6 +9163,13 @@@ F: drivers/net/dsa/microchip/ F: include/linux/platform_data/microchip-ksz.h F: Documentation/devicetree/bindings/net/dsa/ksz.txt
+MICROCHIP LAN743X ETHERNET DRIVER +M: Bryan Whitehead bryan.whitehead@microchip.com +M: Microchip Linux Driver Support UNGLinuxDriver@microchip.com +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/microchip/lan743x_* + MICROCHIP USB251XB DRIVER M: Richard Leitner richard.leitner@skidata.com L: linux-usb@vger.kernel.org @@@ -12892,6 -12876,19 +12893,19 @@@ S: Maintaine F: drivers/net/ethernet/socionext/netsec.c F: Documentation/devicetree/bindings/net/socionext-netsec.txt
+ SOLIDRUN CLEARFOG SUPPORT + M: Russell King linux@armlinux.org.uk + S: Maintained + F: arch/arm/boot/dts/armada-388-clearfog* + F: arch/arm/boot/dts/armada-38x-solidrun-* + + SOLIDRUN CUBOX-I/HUMMINGBOARD SUPPORT + M: Russell King linux@armlinux.org.uk + S: Maintained + F: arch/arm/boot/dts/imx6*-cubox-i* + F: arch/arm/boot/dts/imx6*-hummingboard* + F: arch/arm/boot/dts/imx6*-sr-* + SONIC NETWORK DRIVER M: Thomas Bogendoerfer tsbogend@alpha.franken.de L: netdev@vger.kernel.org @@@ -13661,7 -13658,8 +13675,8 @@@ S: Supporte F: drivers/i2c/busses/i2c-tegra.c
TEGRA IOMMU DRIVERS - M: Hiroshi Doyu hdoyu@nvidia.com + M: Thierry Reding thierry.reding@gmail.com + L: linux-tegra@vger.kernel.org S: Supported F: drivers/iommu/tegra*
@@@ -14940,7 -14938,7 +14955,7 @@@ F: drivers/input/mouse/vmmouse. F: drivers/input/mouse/vmmouse.h
VMWARE VMXNET3 ETHERNET DRIVER -M: Shrikrishna Khare skhare@vmware.com +M: Ronak Doshi doshir@vmware.com M: "VMware, Inc." pv-drivers@vmware.com L: netdev@vger.kernel.org S: Maintained diff --combined drivers/infiniband/hw/mlx5/main.c index 390e4375647e,7f8bda3a2005..071fd9a7b919 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -57,9 -57,7 +57,9 @@@ #include <linux/in.h> #include <linux/etherdevice.h> #include "mlx5_ib.h" +#include "ib_rep.h" #include "cmd.h" +#include <linux/mlx5/fs_helpers.h>
#define DRIVER_NAME "mlx5_ib" #define DRIVER_VERSION "5.0-0" @@@ -132,7 -130,7 +132,7 @@@ static int get_port_state(struct ib_dev int ret;
memset(&attr, 0, sizeof(attr)); - ret = mlx5_ib_query_port(ibdev, port_num, &attr); + ret = ibdev->query_port(ibdev, port_num, &attr); if (!ret) *state = attr.state; return ret; @@@ -156,19 -154,10 +156,19 @@@ static int mlx5_netdev_event(struct not case NETDEV_REGISTER: case NETDEV_UNREGISTER: write_lock(&roce->netdev_lock); - - if (ndev->dev.parent == &mdev->pdev->dev) - roce->netdev = (event == NETDEV_UNREGISTER) ? + if (ibdev->rep) { + struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch; + struct net_device *rep_ndev; + + rep_ndev = mlx5_ib_get_rep_netdev(esw, + ibdev->rep->vport); + if (rep_ndev == ndev) + roce->netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; + } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) { + roce->netdev = (event == NETDEV_UNREGISTER) ? + NULL : ndev; + } write_unlock(&roce->netdev_lock); break;
@@@ -1283,22 -1272,6 +1283,22 @@@ int mlx5_ib_query_port(struct ib_devic return ret; }
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + int ret; + + /* Only link layer == ethernet is valid for representors */ + ret = mlx5_query_port_roce(ibdev, port, props); + if (ret || !props) + return ret; + + /* We don't support GIDS */ + props->gid_tbl_len = 0; + + return ret; +} + static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { @@@ -2317,9 -2290,11 +2317,9 @@@ static void set_tos(void *outer_c, voi offsetof(typeof(filter), field) -\ sizeof(filter.field))
-#define IPV4_VERSION 4 -#define IPV6_VERSION 6 static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, u32 *match_v, const union ib_flow_spec *ib_spec, - u32 *tag_id, bool *is_drop) + struct mlx5_flow_act *action) { void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); @@@ -2402,7 -2377,7 +2402,7 @@@ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_version, 0xf); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ip_version, IPV4_VERSION); + ip_version, MLX5_FS_IPV4_VERSION); } else { MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 0xffff); @@@ -2441,7 -2416,7 +2441,7 @@@ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_version, 0xf); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ip_version, IPV6_VERSION); + ip_version, MLX5_FS_IPV6_VERSION); } else { MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 0xffff); @@@ -2537,14 -2512,13 +2537,14 @@@ if (ib_spec->flow_tag.tag_id >= BIT(24)) return -EINVAL;
- *tag_id = ib_spec->flow_tag.tag_id; + action->flow_tag = ib_spec->flow_tag.tag_id; + action->has_flow_tag = true; break; case IB_FLOW_SPEC_ACTION_DROP: if (FIELDS_NOT_SUPPORTED(ib_spec->drop, LAST_DROP_FIELD)) return -EOPNOTSUPP; - *is_drop = true; + action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; break; default: return -EINVAL; @@@ -2661,7 -2635,7 +2661,7 @@@ static int mlx5_ib_destroy_flow(struct ibflow); struct mlx5_ib_flow_handler *iter, *tmp;
- mutex_lock(&dev->flow_db.lock); + mutex_lock(&dev->flow_db->lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { mlx5_del_flow_rules(iter->rule); @@@ -2672,7 -2646,7 +2672,7 @@@
mlx5_del_flow_rules(handler->rule); put_flow_table(dev, handler->prio, true); - mutex_unlock(&dev->flow_db.lock); + mutex_unlock(&dev->flow_db->lock);
kfree(handler);
@@@ -2721,7 -2695,7 +2721,7 @@@ static struct mlx5_ib_flow_prio *get_fl MLX5_FLOW_NAMESPACE_BYPASS); num_entries = MLX5_FS_MAX_ENTRIES; num_groups = MLX5_FS_MAX_TYPES; - prio = &dev->flow_db.prios[priority]; + prio = &dev->flow_db->prios[priority]; } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { ns = mlx5_get_flow_namespace(dev->mdev, @@@ -2729,7 -2703,7 +2729,7 @@@ build_leftovers_ft_param(&priority, &num_entries, &num_groups); - prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; + prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { if (!MLX5_CAP_FLOWTABLE(dev->mdev, allow_sniffer_and_nic_rx_shared_tir)) @@@ -2739,7 -2713,7 +2739,7 @@@ MLX5_FLOW_NAMESPACE_SNIFFER_RX : MLX5_FLOW_NAMESPACE_SNIFFER_TX);
- prio = &dev->flow_db.sniffer[ft_type]; + prio = &dev->flow_db->sniffer[ft_type]; priority = 0; num_entries = 1; num_groups = 1; @@@ -2797,11 -2771,13 +2797,11 @@@ static struct mlx5_ib_flow_handler *_cr { struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_ib_flow_handler *handler; - struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; struct mlx5_flow_spec *spec; struct mlx5_flow_destination *rule_dst = dst; const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); unsigned int spec_index; - u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - bool is_drop = false; int err = 0; int dest_num = 1;
@@@ -2820,7 -2796,7 +2820,7 @@@ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { err = parse_flow_attr(dev->mdev, spec->match_criteria, spec->match_value, - ib_flow, &flow_tag, &is_drop); + ib_flow, &flow_act); if (err < 0) goto free;
@@@ -2830,20 -2806,9 +2830,20 @@@ if (!flow_is_multicast_only(flow_attr)) set_underlay_qp(dev, spec, underlay_qpn);
+ if (dev->rep) { + void *misc; + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, + dev->rep->vport); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + } + spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); - if (is_drop) { - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { rule_dst = NULL; dest_num = 0; } else { @@@ -2851,14 -2816,15 +2851,14 @@@ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; }
- if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG && + if (flow_act.has_flow_tag && (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", - flow_tag, flow_attr->type); + flow_act.flow_tag, flow_attr->type); err = -EINVAL; goto free; } - flow_act.flow_tag = flow_tag; handler->rule = mlx5_add_flow_rules(ft, spec, &flow_act, rule_dst, dest_num); @@@ -3037,7 -3003,7 +3037,7 @@@ static struct ib_flow *mlx5_ib_create_f if (!dst) return ERR_PTR(-ENOMEM);
- mutex_lock(&dev->flow_db.lock); + mutex_lock(&dev->flow_db->lock);
ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); if (IS_ERR(ft_prio)) { @@@ -3086,7 -3052,7 +3086,7 @@@ goto destroy_ft; }
- mutex_unlock(&dev->flow_db.lock); + mutex_unlock(&dev->flow_db->lock); kfree(dst);
return &handler->ibflow; @@@ -3096,7 -3062,7 +3096,7 @@@ destroy_ft if (ft_prio_tx) put_flow_table(dev, ft_prio_tx, false); unlock: - mutex_unlock(&dev->flow_db.lock); + mutex_unlock(&dev->flow_db->lock); kfree(dst); kfree(handler); return ERR_PTR(err); @@@ -3482,9 -3448,12 +3482,12 @@@ static void destroy_umrc_res(struct mlx if (err) mlx5_ib_warn(dev, "mr cache cleanup failed\n");
- mlx5_ib_destroy_qp(dev->umrc.qp); - ib_free_cq(dev->umrc.cq); - ib_dealloc_pd(dev->umrc.pd); + if (dev->umrc.qp) + mlx5_ib_destroy_qp(dev->umrc.qp); + if (dev->umrc.cq) + ib_free_cq(dev->umrc.cq); + if (dev->umrc.pd) + ib_dealloc_pd(dev->umrc.pd); }
enum { @@@ -3586,12 -3555,15 +3589,15 @@@ static int create_umr_res(struct mlx5_i
error_4: mlx5_ib_destroy_qp(qp); + dev->umrc.qp = NULL;
error_3: ib_free_cq(cq); + dev->umrc.cq = NULL;
error_2: ib_dealloc_pd(pd); + dev->umrc.pd = NULL;
error_0: kfree(attr); @@@ -3803,25 -3775,6 +3809,25 @@@ static int mlx5_port_immutable(struct i return 0; }
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; + + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; + + return 0; +} + static void get_dev_fw_str(struct ib_device *ibdev, char *str) { struct mlx5_ib_dev *dev = @@@ -3852,7 -3805,7 +3858,7 @@@ static int mlx5_eth_lag_init(struct mlx goto err_destroy_vport_lag; }
- dev->flow_db.lag_demux_ft = ft; + dev->flow_db->lag_demux_ft = ft; return 0;
err_destroy_vport_lag: @@@ -3864,9 -3817,9 +3870,9 @@@ static void mlx5_eth_lag_cleanup(struc { struct mlx5_core_dev *mdev = dev->mdev;
- if (dev->flow_db.lag_demux_ft) { - mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft); - dev->flow_db.lag_demux_ft = NULL; + if (dev->flow_db->lag_demux_ft) { + mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); + dev->flow_db->lag_demux_ft = NULL;
mlx5_cmd_destroy_vport_lag(mdev); } @@@ -3898,10 -3851,14 +3904,10 @@@ static int mlx5_enable_eth(struct mlx5_ { int err;
- err = mlx5_add_netdev_notifier(dev, port_num); - if (err) - return err; - if (MLX5_CAP_GEN(dev->mdev, roce)) { err = mlx5_nic_vport_enable_roce(dev->mdev); if (err) - goto err_unregister_netdevice_notifier; + return err; }
err = mlx5_eth_lag_init(dev); @@@ -3914,6 -3871,8 +3920,6 @@@ err_disable_roce if (MLX5_CAP_GEN(dev->mdev, roce)) mlx5_nic_vport_disable_roce(dev->mdev);
-err_unregister_netdevice_notifier: - mlx5_remove_netdev_notifier(dev, port_num); return err; }
@@@ -4547,7 -4506,7 +4553,7 @@@ static void mlx5_ib_cleanup_multiport_m mlx5_nic_vport_disable_roce(dev->mdev); }
-static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_cleanup_multiport_master(dev); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING @@@ -4556,7 -4515,7 +4562,7 @@@ kfree(dev->port); }
-static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; const char *name; @@@ -4578,6 -4537,8 +4584,6 @@@ goto err_free_port;
if (!mlx5_core_mp_enabled(mdev)) { - int i; - for (i = 1; i <= dev->num_ports; i++) { err = get_port_caps(dev, i); if (err) @@@ -4606,6 -4567,7 +4612,6 @@@ dev->mdev->priv.eq_table.num_comp_vectors; dev->ib_dev.dev.parent = &mdev->pdev->dev;
- mutex_init(&dev->flow_db.lock); mutex_init(&dev->cap_mask_mutex); INIT_LIST_HEAD(&dev->qp_list); spin_lock_init(&dev->reset_flow_resource_lock); @@@ -4626,38 -4588,7 +4632,38 @@@ err_free_port return -ENOMEM; }
-static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) +{ + dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); + + if (!dev->flow_db) + return -ENOMEM; + + mutex_init(&dev->flow_db->lock); + + return 0; +} + +int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_ib_dev *nic_dev; + + nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch); + + if (!nic_dev) + return -EINVAL; + + dev->flow_db = nic_dev->flow_db; + + return 0; +} + +static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) +{ + kfree(dev->flow_db); +} + +int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; int err; @@@ -4698,6 -4629,7 +4704,6 @@@ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
dev->ib_dev.query_device = mlx5_ib_query_device; - dev->ib_dev.query_port = mlx5_ib_query_port; dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; dev->ib_dev.query_gid = mlx5_ib_query_gid; dev->ib_dev.add_gid = mlx5_ib_add_gid; @@@ -4740,6 -4672,7 +4746,6 @@@ dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; - dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) @@@ -4790,80 -4723,6 +4796,80 @@@ return 0; }
+static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) +{ + dev->ib_dev.get_port_immutable = mlx5_port_immutable; + dev->ib_dev.query_port = mlx5_ib_query_port; + + return 0; +} + +int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) +{ + dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable; + dev->ib_dev.query_port = mlx5_ib_rep_query_port; + + return 0; +} + +static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev, + u8 port_num) +{ + int i; + + for (i = 0; i < dev->num_ports; i++) { + dev->roce[i].dev = dev; + dev->roce[i].native_port_num = i + 1; + dev->roce[i].last_port_state = IB_PORT_DOWN; + } + + dev->ib_dev.get_netdev = mlx5_ib_get_netdev; + dev->ib_dev.create_wq = mlx5_ib_create_wq; + dev->ib_dev.modify_wq = mlx5_ib_modify_wq; + dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; + dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; + dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; + + dev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | + (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); + + return mlx5_add_netdev_notifier(dev, port_num); +} + +static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev) +{ + u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; + + mlx5_remove_netdev_notifier(dev, port_num); +} + +int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_core_dev *mdev = dev->mdev; + enum rdma_link_layer ll; + int port_type_cap; + int err = 0; + u8 port_num; + + port_num = mlx5_core_native_port_num(dev->mdev) - 1; + port_type_cap = MLX5_CAP_GEN(mdev, port_type); + ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); + + if (ll == IB_LINK_LAYER_ETHERNET) + err = mlx5_ib_stage_common_roce_init(dev, port_num); + + return err; +} + +void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) +{ + mlx5_ib_stage_common_roce_cleanup(dev); +} + static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; @@@ -4871,26 -4730,37 +4877,26 @@@ int port_type_cap; u8 port_num; int err; - int i;
port_num = mlx5_core_native_port_num(dev->mdev) - 1; port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) { - for (i = 0; i < dev->num_ports; i++) { - dev->roce[i].dev = dev; - dev->roce[i].native_port_num = i + 1; - dev->roce[i].last_port_state = IB_PORT_DOWN; - } + err = mlx5_ib_stage_common_roce_init(dev, port_num); + if (err) + return err;
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev; - dev->ib_dev.create_wq = mlx5_ib_create_wq; - dev->ib_dev.modify_wq = mlx5_ib_modify_wq; - dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; - dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; - dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; - dev->ib_dev.uverbs_ex_cmd_mask |= - (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | - (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); err = mlx5_enable_eth(dev, port_num); if (err) - return err; + goto cleanup; }
return 0; +cleanup: + mlx5_ib_stage_common_roce_cleanup(dev); + + return err; }
static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev) @@@ -4906,16 -4776,16 +4912,16 @@@
if (ll == IB_LINK_LAYER_ETHERNET) { mlx5_disable_eth(dev); - mlx5_remove_netdev_notifier(dev, port_num); + mlx5_ib_stage_common_roce_cleanup(dev); } }
-static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) { return create_dev_resources(&dev->devr); }
-static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) { destroy_dev_resources(&dev->devr); } @@@ -4927,7 -4797,7 +4933,7 @@@ static int mlx5_ib_stage_odp_init(struc return mlx5_ib_odp_init_one(dev); }
-static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; @@@ -4939,7 -4809,7 +4945,7 @@@ return 0; }
-static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) mlx5_ib_dealloc_counters(dev); @@@ -4970,7 -4840,7 +4976,7 @@@ static void mlx5_ib_stage_uar_cleanup(s mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); }
-static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) { int err;
@@@ -4985,28 -4855,28 +4991,28 @@@ return err; }
-static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) { mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); mlx5_free_bfreg(dev->mdev, &dev->bfreg); }
-static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { return ib_register_device(&dev->ib_dev, NULL); }
-static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) { destroy_umrc_res(dev); }
-static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) +void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) { ib_unregister_device(&dev->ib_dev); }
-static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) { return create_umr_res(dev); } @@@ -5023,7 -4893,7 +5029,7 @@@ static void mlx5_ib_stage_delay_drop_cl cancel_delay_drop(dev); }
-static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev) +int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev) { int err; int i; @@@ -5038,21 -4908,9 +5044,21 @@@ return 0; }
-static void __mlx5_ib_remove(struct mlx5_ib_dev *dev, - const struct mlx5_ib_profile *profile, - int stage) +static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev) +{ + mlx5_ib_register_vport_reps(dev); + + return 0; +} + +static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev) +{ + mlx5_ib_unregister_vport_reps(dev); +} + +void __mlx5_ib_remove(struct mlx5_ib_dev *dev, + const struct mlx5_ib_profile *profile, + int stage) { /* Number of stages to cleanup */ while (stage) { @@@ -5066,14 -4924,23 +5072,14 @@@
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
-static void *__mlx5_ib_add(struct mlx5_core_dev *mdev, - const struct mlx5_ib_profile *profile) +void *__mlx5_ib_add(struct mlx5_ib_dev *dev, + const struct mlx5_ib_profile *profile) { - struct mlx5_ib_dev *dev; int err; int i;
printk_once(KERN_INFO "%s", mlx5_version);
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); - if (!dev) - return NULL; - - dev->mdev = mdev; - dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), - MLX5_CAP_GEN(mdev, num_vhca_ports)); - for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { if (profile->stage[i].init) { err = profile->stage[i].init(dev); @@@ -5097,15 -4964,9 +5103,15 @@@ static const struct mlx5_ib_profile pf_ STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, + mlx5_ib_stage_flow_db_init, + mlx5_ib_stage_flow_db_cleanup), STAGE_CREATE(MLX5_IB_STAGE_CAPS, mlx5_ib_stage_caps_init, NULL), + STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, + mlx5_ib_stage_non_default_cb, + NULL), STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_stage_roce_init, mlx5_ib_stage_roce_cleanup), @@@ -5144,51 -5005,6 +5150,51 @@@ NULL), };
+static const struct mlx5_ib_profile nic_rep_profile = { + STAGE_CREATE(MLX5_IB_STAGE_INIT, + mlx5_ib_stage_init_init, + mlx5_ib_stage_init_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, + mlx5_ib_stage_flow_db_init, + mlx5_ib_stage_flow_db_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_CAPS, + mlx5_ib_stage_caps_init, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, + mlx5_ib_stage_rep_non_default_cb, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_ROCE, + mlx5_ib_stage_rep_roce_init, + mlx5_ib_stage_rep_roce_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, + mlx5_ib_stage_dev_res_init, + mlx5_ib_stage_dev_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, + mlx5_ib_stage_counters_init, + mlx5_ib_stage_counters_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_UAR, + mlx5_ib_stage_uar_init, + mlx5_ib_stage_uar_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_BFREG, + mlx5_ib_stage_bfrag_init, + mlx5_ib_stage_bfrag_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, + NULL, + mlx5_ib_stage_pre_ib_reg_umr_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_IB_REG, + mlx5_ib_stage_ib_reg_init, + mlx5_ib_stage_ib_reg_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, + mlx5_ib_stage_post_ib_reg_umr_init, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, + mlx5_ib_stage_class_attr_init, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_REP_REG, + mlx5_ib_stage_rep_reg_init, + mlx5_ib_stage_rep_reg_cleanup), +}; + static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num) { struct mlx5_ib_multiport_info *mpi; @@@ -5234,11 -5050,8 +5240,11 @@@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { enum rdma_link_layer ll; + struct mlx5_ib_dev *dev; int port_type_cap;
+ printk_once(KERN_INFO "%s", mlx5_version); + port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@@ -5248,22 -5061,7 +5254,22 @@@ return mlx5_ib_add_slave_port(mdev, port_num); }
- return __mlx5_ib_add(mdev, &pf_profile); + dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) + return NULL; + + dev->mdev = mdev; + dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), + MLX5_CAP_GEN(mdev, num_vhca_ports)); + + if (MLX5_VPORT_MANAGER(mdev) && + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { + dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); + + return __mlx5_ib_add(dev, &nic_rep_profile); + } + + return __mlx5_ib_add(dev, &pf_profile); }
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) diff --combined drivers/infiniband/hw/mlx5/mr.c index 95a36e9ea552,3e0b3f0238d6..654bc31bc428 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@@ -587,7 -587,7 +587,7 @@@ static void clean_keys(struct mlx5_ib_d
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) { - if (!mlx5_debugfs_root) + if (!mlx5_debugfs_root || dev->rep) return;
debugfs_remove_recursive(dev->cache.root); @@@ -600,7 -600,7 +600,7 @@@ static int mlx5_mr_cache_debugfs_init(s struct mlx5_cache_ent *ent; int i;
- if (!mlx5_debugfs_root) + if (!mlx5_debugfs_root || dev->rep) return 0;
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); @@@ -690,7 -690,6 +690,7 @@@ int mlx5_mr_cache_init(struct mlx5_ib_d MLX5_IB_UMR_OCTOWORD; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && + !dev->rep && mlx5_core_is_pf(dev->mdev)) ent->limit = dev->mdev->profile->mr_cache[i].limit; else @@@ -740,6 -739,9 +740,9 @@@ int mlx5_mr_cache_cleanup(struct mlx5_i { int i;
+ if (!dev->cache.wq) + return 0; + dev->cache.stopped = 1; flush_workqueue(dev->cache.wq);
diff --combined drivers/infiniband/hw/qedr/main.c index eb32abb0099a,0ffb9b93e22d..f9a645c869ce --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@@ -90,8 -90,8 +90,8 @@@ static struct net_device *qedr_get_netd dev_hold(qdev->ndev);
/* The HW vendor's device driver must guarantee - * that this function returns NULL before the net device reaches - * NETDEV_UNREGISTER_FINAL state. + * that this function returns NULL before the net device has finished + * NETDEV_UNREGISTER state. */ return qdev->ndev; } @@@ -833,7 -833,8 +833,8 @@@ static struct qedr_dev *qedr_add(struc
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); if (!dev->num_cnq) { - DP_ERR(dev, "not enough CNQ resources.\n"); + DP_ERR(dev, "Failed. At least one CNQ is required.\n"); + rc = -ENOMEM; goto init_err; }
diff --combined drivers/infiniband/hw/qedr/verbs.c index 7d51ef47667f,419a158e8fca..f9c3cc71f5c0 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@@ -1841,14 -1841,15 +1841,15 @@@ static void qedr_reset_qp_hwq_info(stru
static int qedr_update_qp_state(struct qedr_dev *dev, struct qedr_qp *qp, + enum qed_roce_qp_state cur_state, enum qed_roce_qp_state new_state) { int status = 0;
- if (new_state == qp->state) + if (new_state == cur_state) return 0;
- switch (qp->state) { + switch (cur_state) { case QED_ROCE_QP_STATE_RESET: switch (new_state) { case QED_ROCE_QP_STATE_INIT: @@@ -1955,6 -1956,7 +1956,7 @@@ int qedr_modify_qp(struct ib_qp *ibqp, struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); enum ib_qp_state old_qp_state, new_qp_state; + enum qed_roce_qp_state cur_state; int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP, @@@ -2086,18 -2088,23 +2088,23 @@@ SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
- qp_params.ack_timeout = attr->timeout; - if (attr->timeout) { - u32 temp; - - temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; - /* FW requires [msec] */ - qp_params.ack_timeout = temp; - } else { - /* Infinite */ + /* The received timeout value is an exponent used like this: + * "12.7.34 LOCAL ACK TIMEOUT + * Value representing the transport (ACK) timeout for use by + * the remote, expressed as: 4.096 * 2^timeout [usec]" + * The FW expects timeout in msec so we need to divide the usec + * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2, + * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8). + * The value of zero means infinite so we use a 'max_t' to make + * sure that sub 1 msec values will be configured as 1 msec. + */ + if (attr->timeout) + qp_params.ack_timeout = + 1 << max_t(int, attr->timeout - 8, 0); + else qp_params.ack_timeout = 0; - } } + if (attr_mask & IB_QP_RETRY_CNT) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); @@@ -2170,13 -2177,25 +2177,25 @@@ qp->dest_qp_num = attr->dest_qp_num; }
+ cur_state = qp->state; + + /* Update the QP state before the actual ramrod to prevent a race with + * fast path. Modifying the QP state to error will cause the device to + * flush the CQEs and while polling the flushed CQEs will considered as + * a potential issue if the QP isn't in error state. + */ + if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI && + !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR) + qp->state = QED_ROCE_QP_STATE_ERR; + if (qp->qp_type != IB_QPT_GSI) rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
if (attr_mask & IB_QP_STATE) { if ((qp->qp_type != IB_QPT_GSI) && (!udata)) - rc = qedr_update_qp_state(dev, qp, qp_params.new_state); + rc = qedr_update_qp_state(dev, qp, cur_state, + qp_params.new_state); qp->state = qp_params.new_state; }
@@@ -3695,7 -3714,7 +3714,7 @@@ static int process_resp_flush(struct qe static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_responder *resp, int *update) { - if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) { + if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) { consume_cqe(cq); *update |= 1; } @@@ -3710,7 -3729,7 +3729,7 @@@ static int qedr_poll_cq_resp(struct qed
if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) { cnt = process_resp_flush(qp, cq, num_entries, wc, - resp->rq_cons); + resp->rq_cons_or_srq_id); try_consume_resp_cqe(cq, qp, resp, update); } else { cnt = process_resp_one(dev, qp, cq, wc, resp); diff --combined drivers/net/dsa/mt7530.c index 511ca134f13f,4e53c5ce23ff..d244c41898dd --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@@ -604,7 -604,7 +604,7 @@@ mt7530_get_ethtool_stats(struct dsa_swi }
static int -mt7530_get_sset_count(struct dsa_switch *ds) +mt7530_get_sset_count(struct dsa_switch *ds, int port) { return ARRAY_SIZE(mt7530_mib); } @@@ -1409,6 -1409,7 +1409,7 @@@ static const struct of_device_id mt7530 { .compatible = "mediatek,mt7530" }, { /* sentinel */ }, }; + MODULE_DEVICE_TABLE(of, mt7530_of_match);
static struct mdio_driver mt7530_mdio_driver = { .probe = mt7530_probe, @@@ -1424,4 -1425,3 +1425,3 @@@ mdio_module_driver(mt7530_mdio_driver) MODULE_AUTHOR("Sean Wang sean.wang@mediatek.com"); MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); MODULE_LICENSE("GPL"); - MODULE_ALIAS("platform:mediatek-mt7530"); diff --combined drivers/net/ethernet/marvell/mvneta.c index a58acdb5eba3,3f6fb635738c..b26bcdf4cd03 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -1132,6 -1132,7 +1132,7 @@@ static void mvneta_port_up(struct mvnet } mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+ q_map = 0; /* Enable all initialized RXQs. */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; @@@ -1555,6 -1556,7 +1556,6 @@@ static void mvneta_rx_pkts_coal_set(str { mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), value | MVNETA_RXQ_NON_OCCUPIED(0)); - rxq->pkts_coal = value; }
/* Set the time delay in usec before RX interrupt will be generated by @@@ -1570,6 -1572,7 +1571,6 @@@ static void mvneta_rx_time_coal_set(str val = (clk_rate / 1000000) * value;
mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); - rxq->time_coal = value; }
/* Set threshold for TX_DONE pkts coalescing */ @@@ -1584,6 -1587,8 +1585,6 @@@ static void mvneta_tx_done_pkts_coal_se val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); - - txq->done_pkts_coal = value; }
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ @@@ -3392,8 -3397,7 +3393,8 @@@ static void mvneta_set_eee(struct mvnet mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); }
-static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) +static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode, + phy_interface_t interface) { struct mvneta_port *pp = netdev_priv(ndev); u32 val; @@@ -3412,7 -3416,6 +3413,7 @@@ }
static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) { struct mvneta_port *pp = netdev_priv(ndev); @@@ -4071,6 -4074,22 +4072,6 @@@ static int mvneta_ethtool_set_wol(struc return ret; }
-static int mvneta_ethtool_get_module_info(struct net_device *dev, - struct ethtool_modinfo *modinfo) -{ - struct mvneta_port *pp = netdev_priv(dev); - - return phylink_ethtool_get_module_info(pp->phylink, modinfo); -} - -static int mvneta_ethtool_get_module_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, u8 *buf) -{ - struct mvneta_port *pp = netdev_priv(dev); - - return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf); -} - static int mvneta_ethtool_get_eee(struct net_device *dev, struct ethtool_eee *eee) { @@@ -4145,6 -4164,8 +4146,6 @@@ static const struct ethtool_ops mvneta_ .set_link_ksettings = mvneta_ethtool_set_link_ksettings, .get_wol = mvneta_ethtool_get_wol, .set_wol = mvneta_ethtool_set_wol, - .get_module_info = mvneta_ethtool_get_module_info, - .get_module_eeprom = mvneta_ethtool_get_module_eeprom, .get_eee = mvneta_ethtool_get_eee, .set_eee = mvneta_ethtool_set_eee, }; @@@ -4635,8 -4656,8 +4636,8 @@@ MODULE_DESCRIPTION("Marvell NETA Ethern MODULE_AUTHOR("Rami Rosen rosenr@marvell.com, Thomas Petazzoni thomas.petazzoni@free-electrons.com"); MODULE_LICENSE("GPL");
-module_param(rxq_number, int, S_IRUGO); -module_param(txq_number, int, S_IRUGO); +module_param(rxq_number, int, 0444); +module_param(txq_number, int, 0444);
-module_param(rxq_def, int, S_IRUGO); -module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); +module_param(rxq_def, int, 0444); +module_param(rx_copybreak, int, 0644); diff --combined drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 9a7a2f05ab35,f3302edba8b4..a30a2e95d13f --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@@ -199,10 -199,6 +199,10 @@@ static const char main_strings[][ETH_GS "rx_xdp_drop", "rx_xdp_tx", "rx_xdp_tx_full", + + /* phy statistics */ + "rx_packets_phy", "rx_bytes_phy", + "tx_packets_phy", "tx_bytes_phy", };
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { @@@ -415,10 -411,6 +415,10 @@@ static void mlx4_en_get_ethtool_stats(s if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
+ for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->phy_stats)[i]; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { data[index++] = priv->tx_ring[TX][i]->packets; data[index++] = priv->tx_ring[TX][i]->bytes; @@@ -498,12 -490,6 +498,12 @@@ static void mlx4_en_get_strings(struct strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[strings]);
+ for (i = 0; i < NUM_PHY_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + for (i = 0; i < priv->tx_ring_num[TX]; i++) { sprintf(data + (index++) * ETH_GSTRING_LEN, "tx%d_packets", i); @@@ -1060,27 -1046,32 +1060,32 @@@ static int mlx4_en_set_pauseparam(struc { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; int err;
if (pause->autoneg) return -EINVAL;
- priv->prof->tx_pause = pause->tx_pause != 0; - priv->prof->rx_pause = pause->rx_pause != 0; + tx_pause = !!(pause->tx_pause); + rx_pause = !!(pause->rx_pause); + rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); + tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); + err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp); - if (err) - en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - priv->prof->rx_ppp, - priv->prof->rx_pause, - priv->prof->tx_ppp, - priv->prof->tx_pause); + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { + en_err(priv, "Failed setting pause params, err = %d\n", err); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + priv->prof->tx_pause = tx_pause; + priv->prof->rx_pause = rx_pause; + priv->prof->tx_ppp = tx_ppp; + priv->prof->rx_ppp = rx_ppp;
return err; } diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a87d46bc2299,59ebfdae6695..37fd0245b6c1 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@@ -203,6 -203,9 +203,6 @@@ void mlx5e_ethtool_get_ethtool_stats(st { int i, idx = 0;
- if (!data) - return; - mutex_lock(&priv->state_lock); mlx5e_update_stats(priv); mutex_unlock(&priv->state_lock); @@@ -220,12 -223,60 +220,12 @@@ static void mlx5e_get_ethtool_stats(str mlx5e_ethtool_get_ethtool_stats(priv, stats, data); }
-static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, - int num_wqe) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_wqe; - - stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; - num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; - wqe_size = stride_size * num_strides; - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); -} - -static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, - int num_packets) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - int num_wqes; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_packets; - - stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; - num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; - wqe_size = stride_size * num_strides; - - num_packets = (1 << order_base_2(num_packets)); - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); - return 1 << (order_base_2(num_wqes)); -} - void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; - - param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_max_log_rq_size(rq_wq_type)); + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; - param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << priv->channels.params.log_rq_size); + param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->tx_pending = 1 << priv->channels.params.log_sq_size; }
@@@ -240,9 -291,13 +240,9 @@@ static void mlx5e_get_ringparam(struct int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; struct mlx5e_channels new_channels = {}; - u32 rx_pending_wqes; - u32 min_rq_size; u8 log_rq_size; u8 log_sq_size; - u32 num_mtts; int err = 0;
if (param->rx_jumbo_pending) { @@@ -256,10 -311,23 +256,10 @@@ return -EINVAL; }
- min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_min_log_rq_size(rq_wq_type)); - rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, - param->rx_pending); - - if (param->rx_pending < min_rq_size) { + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - min_rq_size); - return -EINVAL; - } - - num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); - if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - !MLX5E_VALID_NUM_MTTS(num_mtts)) { - netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", - __func__, param->rx_pending); + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); return -EINVAL; }
@@@ -270,17 -338,17 +270,17 @@@ return -EINVAL; }
- log_rq_size = order_base_2(rx_pending_wqes); + log_rq_size = order_base_2(param->rx_pending); log_sq_size = order_base_2(param->tx_pending);
- if (log_rq_size == priv->channels.params.log_rq_size && + if (log_rq_size == priv->channels.params.log_rq_mtu_frames && log_sq_size == priv->channels.params.log_sq_size) return 0;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params; - new_channels.params.log_rq_size = log_rq_size; + new_channels.params.log_rq_mtu_frames = log_rq_size; new_channels.params.log_sq_size = log_sq_size;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { @@@ -409,6 -477,9 +409,9 @@@ static int mlx5e_get_coalesce(struct ne return mlx5e_ethtool_get_coalesce(priv, coal); }
+ #define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD + #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT + static void mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { @@@ -443,6 -514,20 +446,20 @@@ int mlx5e_ethtool_set_coalesce(struct m if (!MLX5_CAP_GEN(mdev, cq_moderation)) return -EOPNOTSUPP;
+ if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || + coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) { + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n", + __func__, MLX5E_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) { + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n", + __func__, MLX5E_MAX_COAL_FRAMES); + return -ERANGE; + } + mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params;
@@@ -998,66 -1083,16 +1015,66 @@@ static int mlx5e_get_rxnfc(struct net_d return err; }
+#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 +#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000 +#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85 +#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80 +#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \ + max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \ + (critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100) + +static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev, + u16 *pfc_prevention_tout) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) || + !MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) + return -EOPNOTSUPP; + + return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL); +} + +static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev, + u16 pfc_preven) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 critical_tout; + u16 minor; + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) || + !MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) + return -EOPNOTSUPP; + + critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ? + MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC : + pfc_preven; + + if (critical_tout != PFC_STORM_PREVENTION_DISABLE && + (critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC || + critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) { + netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n", + __func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, + MLX5E_PFC_PREVEN_TOUT_MAX_MSEC); + return -EINVAL; + } + + minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout); + return mlx5_set_port_stall_watermark(mdev, critical_tout, + minor); +} + static int mlx5e_get_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, void *data) { - const struct mlx5e_priv *priv = netdev_priv(dev); - int err = 0; + int err;
switch (tuna->id) { - case ETHTOOL_TX_COPYBREAK: - *(u32 *)data = priv->channels.params.tx_max_inline; + case ETHTOOL_PFC_PREVENTION_TOUT: + err = mlx5e_get_pfc_prevention_tout(dev, data); break; default: err = -EINVAL; @@@ -1072,13 -1107,34 +1089,13 @@@ static int mlx5e_set_tunable(struct net const void *data) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; - int err = 0; - u32 val; + int err;
mutex_lock(&priv->state_lock);
switch (tuna->id) { - case ETHTOOL_TX_COPYBREAK: - val = *(u32 *)data; - if (val > mlx5e_get_max_inline_cap(mdev)) { - err = -EINVAL; - break; - } - - new_channels.params = priv->channels.params; - new_channels.params.tx_max_inline = val; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - break; - } - - err = mlx5e_open_channels(priv, &new_channels); - if (err) - break; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - + case ETHTOOL_PFC_PREVENTION_TOUT: + err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data); break; default: err = -EINVAL; @@@ -1468,6 -1524,11 +1485,6 @@@ int mlx5e_modify_rx_cqe_compression_loc new_channels.params = priv->channels.params; MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- new_channels.params.mpwqe_log_stride_sz = - MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val); - new_channels.params.mpwqe_log_num_strides = - MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; return 0; @@@ -1505,38 -1566,6 +1522,38 @@@ static int set_pflag_rx_cqe_compress(st return 0; }
+static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err; + + if (enable) { + if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) + return -EOPNOTSUPP; + if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) + return -EINVAL; + } + + new_channels.params = priv->channels.params; + + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); + mlx5e_set_rq_type(mdev, &new_channels.params); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + return 0; + } + + err = mlx5e_open_channels(priv, &new_channels); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + return 0; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@@ -1582,12 -1611,6 +1599,12 @@@ static int mlx5e_set_priv_flags(struct err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_CQE_COMPRESS, set_pflag_rx_cqe_compress); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_STRIDING_RQ, + set_pflag_rx_striding_rq);
out: mutex_unlock(&priv->state_lock); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0339609cfa56,9b4827d36e3e..c71f4f10283b --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -71,145 -71,56 +71,145 @@@ struct mlx5e_channel_param struct mlx5e_cq_param icosq_cq; };
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { - return MLX5_CAP_GEN(mdev, striding_rq) && + bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && MLX5_CAP_ETH(mdev, reg_umr_sq); + u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); + bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; + + if (!striding_rq_umr) + return false; + if (!inline_umr) { + mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", + (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); + return false; + } + return true; +} + +static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +{ + if (!params->xdp_prog) { + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; + + return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); + } + + return PAGE_SIZE; +} + +static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) +{ + u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + + return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); +} + +static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + s8 signed_log_num_strides_param; + u8 log_num_strides; + + if (params->lro_en || frag_sz > PAGE_SIZE) + return false; + + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) + return true; + + log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); + signed_log_num_strides_param = + (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; + + return signed_log_num_strides_param >= 0; +} + +static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) +{ + if (params->log_rq_mtu_frames < + mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + + return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params); +} + +static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + + return MLX5E_MPWQE_STRIDE_SZ(mdev, + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); +} + +static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return MLX5_MPWRQ_LOG_WQE_SZ - + mlx5e_mpwqe_get_log_stride_size(mdev, params); +} + +static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + + linear_rq_headroom += NET_IP_ALIGN; + + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) + return linear_rq_headroom; + + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return linear_rq_headroom; + + return 0; }
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, u8 rq_type) + struct mlx5e_params *params) { - params->rq_wq_type = rq_type; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + params->log_rq_mtu_frames = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; - params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev, - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); - params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - - params->mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - params->rq_headroom = params->xdp_prog ? - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; - params->rq_headroom += NET_IP_ALIGN; - /* Extra room needed for build_skb */ - params->lro_wqe_sz -= params->rq_headroom + + params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); }
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, - BIT(params->log_rq_size), - BIT(params->mpwqe_log_stride_sz), + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + BIT(mlx5e_mpwqe_get_log_rq_size(params)) : + BIT(params->log_rq_mtu_frames), + BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); }
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && - !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_LINKED_LIST; - mlx5e_init_rq_type_params(mdev, params, rq_type); + return mlx5e_check_fragmented_striding_rq_cap(mdev) && + !MLX5_IPSEC_DEV(mdev) && + !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params)); +} + +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +{ + params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : + MLX5_WQ_TYPE_LINKED_LIST; }
static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@@ -242,6 -153,26 +242,6 @@@ static void mlx5e_update_carrier_work(s mutex_unlock(&priv->state_lock); }
-static void mlx5e_tx_timeout_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - tx_timeout_work); - int err; - - rtnl_lock(); - mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); - if (err) - netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", - err); -unlock: - mutex_unlock(&priv->state_lock); - rtnl_unlock(); -} - void mlx5e_update_stats(struct mlx5e_priv *priv) { int i; @@@ -304,38 -235,107 +304,38 @@@ static void mlx5e_disable_async_events( synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC)); }
-static inline int mlx5e_get_wqe_mtt_sz(void) -{ - /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the mtt array, we allocate - * a little more. - */ - return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), - MLX5_UMR_MTT_ALIGNMENT); -} - static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - struct mlx5e_umr_wqe *wqe, - u16 ix) + struct mlx5e_umr_wqe *wqe) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; - struct mlx5_wqe_data_seg *dseg = &wqe->data; - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); - u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); + u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->imm = rq->mkey_be;
- ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; + ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); - ucseg->bsf_octowords = - cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); - - dseg->lkey = sq->mkey_be; - dseg->addr = cpu_to_be64(wi->umr.mtt_addr); }
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) - goto err_out; - - /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, - cpu_to_node(c->cpu)); - if (unlikely(!rq->mpwqe.mtt_no_align)) - goto err_free_wqe_info; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc, - MLX5_UMR_ALIGN); - wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz, - PCI_DMA_TODEVICE); - if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr))) - goto err_unmap_mtts; + return -ENOMEM;
- mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i); - } + mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
return 0; - -err_unmap_mtts: - while (--i >= 0) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); -err_free_wqe_info: - kfree(rq->mpwqe.info); - -err_out: - return -ENOMEM; -} - -static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) -{ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int i; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); - kfree(rq->mpwqe.info); }
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, @@@ -347,6 -347,9 +347,6 @@@ u32 *in; int err;
- if (!MLX5E_VALID_NUM_MTTS(npages)) - return -EINVAL; - in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@@ -379,11 -382,6 +379,11 @@@ static int mlx5e_create_rq_umr_mkey(str return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); }
+static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +{ + return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@@ -417,7 -415,6 +417,7 @@@ rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@@ -431,10 -428,11 +431,10 @@@ goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->buff.headroom = params->rq_headroom; + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
@@@ -452,12 -450,8 +452,12 @@@ goto err_rq_wq_destroy; }
- rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz; - rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides); + rq->mpwqe.skb_from_cqe_mpwrq = + mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_mpwrq_linear : + mlx5e_skb_from_cqe_mpwrq_nonlinear; + rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); + rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
@@@ -496,7 -490,7 +496,7 @@@
byte_count = params->lro_en ? params->lro_wqe_sz : - MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); + MLX5E_SW2HW_MTU(params, params->sw_mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) byte_count += MLX5E_METADATA_ETHER_LEN; @@@ -516,9 -510,9 +516,9 @@@ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; + u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
- wqe->data.addr = cpu_to_be64(dma_offset); + wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); }
wqe->data.byte_count = cpu_to_be32(byte_count); @@@ -564,7 -558,7 +564,7 @@@ static void mlx5e_free_rq(struct mlx5e_
switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - mlx5e_rq_free_mpwqe_info(rq); + kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@@ -621,7 -615,8 +621,7 @@@ static int mlx5e_create_rq(struct mlx5e static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { - struct mlx5e_channel *c = rq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = rq->mdev;
void *in; void *rqc; @@@ -903,6 -898,7 +903,6 @@@ static int mlx5e_alloc_icosq(struct mlx struct mlx5_core_dev *mdev = c->mdev; int err;
- sq->mkey_be = c->mkey_be; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map;
@@@ -957,7 -953,6 +957,7 @@@ static int mlx5e_alloc_txqsq_db(struct return 0; }
+static void mlx5e_sq_recover(struct work_struct *work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@@ -975,8 -970,8 +975,8 @@@ sq->channel = c; sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; - sq->max_inline = params->tx_max_inline; sq->min_inline_mode = params->tx_min_inline_mode; + INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
@@@ -1043,7 -1038,6 +1043,7 @@@ static int mlx5e_create_sq(struct mlx5_ MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); @@@ -1162,20 -1156,9 +1162,20 @@@ err_free_txqsq return err; }
+static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) +{ + WARN_ONCE(sq->cc != sq->pc, + "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", + sq->sqn, sq->cc, sq->pc); + sq->cc = 0; + sq->dma_fifo_cc = 0; + sq->pc = 0; +} + static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); @@@ -1220,107 -1203,6 +1220,107 @@@ static void mlx5e_close_txqsq(struct ml mlx5e_free_txqsq(sq); }
+static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) +{ + unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + + while (time_before(jiffies, exp_time)) { + if (sq->cc == sq->pc) + return 0; + + msleep(20); + } + + netdev_err(sq->channel->netdev, + "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", + sq->sqn, sq->cc, sq->pc); + + return -ETIMEDOUT; +} + +static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + struct mlx5e_modify_sq_param msp = {0}; + int err; + + msp.curr_state = curr_state; + msp.next_state = MLX5_SQC_STATE_RST; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); + return err; + } + + memset(&msp, 0, sizeof(msp)); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); + return err; + } + + return 0; +} + +static void mlx5e_sq_recover(struct work_struct *work) +{ + struct mlx5e_txqsq_recover *recover = + container_of(work, struct mlx5e_txqsq_recover, + recover_work); + struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq, + recover); + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + u8 state; + int err; + + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); + if (err) { + netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", + sq->sqn, err); + return; + } + + if (state != MLX5_RQC_STATE_ERR) { + netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); + return; + } + + netif_tx_disable_queue(sq->txq); + + if (mlx5e_wait_for_sq_flush(sq)) + return; + + /* If the interval between two consecutive recovers per SQ is too + * short, don't recover to avoid infinite loop of ERR_CQE -> recover. + * If we reached this state, there is probably a bug that needs to be + * fixed. let's keep the queue close and let tx timeout cleanup. + */ + if (jiffies_to_msecs(jiffies - recover->last_recover) < + MLX5E_SQ_RECOVER_MIN_INTERVAL) { + netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n", + sq->sqn); + return; + } + + /* At this point, no new packets will arrive from the stack as TXQ is + * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all + * pending WQEs. SQ can safely reset the SQ. + */ + if (mlx5e_sq_to_ready(sq, state)) + return; + + mlx5e_reset_txqsq_cc_pc(sq); + sq->stats.recover++; + recover->last_recover = jiffies; + mlx5e_activate_txqsq(sq); +} + static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, @@@ -1861,47 -1743,39 +1861,47 @@@ static void mlx5e_build_rq_param(struc struct mlx5e_params *params, struct mlx5e_rq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9); - MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6); + MLX5_SET(wq, wq, log_wqe_num_of_strides, + mlx5e_mpwqe_get_log_num_strides(mdev, params) - + MLX5_MPWQE_LOG_NUM_STRIDES_BASE); + MLX5_SET(wq, wq, log_wqe_stride_size, + mlx5e_mpwqe_get_log_stride_size(mdev, params) - + MLX5_MPWQE_LOG_STRIDE_SZ_BASE); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); }
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); - MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size); - MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.linear = 1; }
-static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, +static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, struct mlx5e_rq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); } @@@ -1942,17 -1816,15 +1942,17 @@@ static void mlx5e_build_rx_cq_param(str struct mlx5e_params *params, struct mlx5e_cq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *cqc = param->cqc; u8 log_cq_size;
switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides; + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - log_cq_size = params->log_rq_size; + log_cq_size = params->log_rq_mtu_frames; }
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); @@@ -2503,10 -2375,10 +2503,10 @@@ static void mlx5e_build_inner_indir_tir mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); }
-static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) +static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 mtu) { - struct mlx5_core_dev *mdev = priv->mdev; - u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu); + u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu); int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1); @@@ -2518,9 -2390,9 +2518,9 @@@ return 0; }
-static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) +static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 *mtu) { - struct mlx5_core_dev *mdev = priv->mdev; u16 hw_mtu = 0; int err;
@@@ -2528,27 -2400,25 +2528,27 @@@ if (err || !hw_mtu) /* fallback to port oper mtu */ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
- *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu); + *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); }
static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) { + struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; u16 mtu; int err;
- err = mlx5e_set_mtu(priv, netdev->mtu); + err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err;
- mlx5e_query_mtu(priv, &mtu); - if (mtu != netdev->mtu) + mlx5e_query_mtu(mdev, params, &mtu); + if (mtu != params->sw_mtu) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", - __func__, mtu, netdev->mtu); + __func__, mtu, params->sw_mtu);
- netdev->mtu = mtu; + params->sw_mtu = mtu; return 0; }
@@@ -2702,6 -2572,9 +2702,9 @@@ int mlx5e_open(struct net_device *netde mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock);
+ if (mlx5e_vxlan_allowed(priv->mdev)) + udp_tunnel_get_rx_info(netdev); + return err; }
@@@ -2773,16 -2646,15 +2776,16 @@@ static int mlx5e_alloc_drop_cq(struct m return mlx5e_alloc_cq_common(mdev, param, cq); }
-static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, +static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, struct mlx5e_rq *drop_rq) { + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; struct mlx5e_rq_param rq_param = {}; struct mlx5e_cq *cq = &drop_rq->cq; int err;
- mlx5e_build_drop_rq_param(mdev, &rq_param); + mlx5e_build_drop_rq_param(priv, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); if (err) @@@ -2800,10 -2672,6 +2803,10 @@@ if (err) goto err_free_rq;
+ err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err); + return 0;
err_free_rq: @@@ -3229,28 -3097,20 +3232,28 @@@ typedef int (*mlx5e_feature_handler)(st static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; + struct mlx5e_params *old_params; int err = 0; bool reset;
mutex_lock(&priv->state_lock);
- reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST); - reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); + old_params = &priv->channels.params; + reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = priv->channels.params; + new_channels.params = *old_params; new_channels.params.lro_en = enable;
+ if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) + reset = false; + } + if (!reset) { - priv->channels.params = new_channels.params; + *old_params = new_channels.params; err = mlx5e_modify_tirs_lro(priv); goto out; } @@@ -3379,20 -3239,24 +3382,20 @@@ static int mlx5e_set_features(struct ne netdev_features_t features) { netdev_features_t oper_features = netdev->features; - int err; + int err = 0; + +#define MLX5E_HANDLE_FEATURE(feature, handler) \ + mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
- err = mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_LRO, set_feature_lro); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_VLAN_CTAG_FILTER, + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_TC, set_feature_tc_num_filters); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_RXALL, set_feature_rx_all); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_RXFCS, set_feature_rx_fcs); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); #ifdef CONFIG_RFS_ACCEL - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_NTUPLE, set_feature_arfs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); #endif
if (err) { @@@ -3426,40 -3290,34 +3429,40 @@@ static int mlx5e_change_mtu(struct net_ { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; bool reset;
mutex_lock(&priv->state_lock);
- reset = !priv->channels.params.lro_en && - (priv->channels.params.rq_wq_type != - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + params = &priv->channels.params;
+ reset = !params->lro_en; reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
- curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; + + if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); + + reset = reset && (ppw_old != ppw_new); + }
if (!reset) { + params->sw_mtu = new_mtu; mlx5e_set_dev_port_mtu(priv); + netdev->mtu = params->sw_mtu; goto out; }
- new_channels.params = priv->channels.params; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - }
mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + netdev->mtu = new_channels.params.sw_mtu;
out: mutex_unlock(&priv->state_lock); @@@ -3749,11 -3607,21 +3752,11 @@@ static netdev_features_t mlx5e_features static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - int irqn_not_used, eqn; - struct mlx5_eq *eq; + struct mlx5_eq *eq = sq->cq.mcq.eq; u32 eqe_count;
- if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used)) - return false; - - eq = mlx5_eqn2eq(mdev, eqn); - if (IS_ERR(eq)) - return false; - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eqn, eq->cons_index, eq->irqn); + eq->eqn, eq->cons_index, eq->irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) @@@ -3764,19 -3632,13 +3767,19 @@@ return true; }
-static void mlx5e_tx_timeout(struct net_device *dev) +static void mlx5e_tx_timeout_work(struct work_struct *work) { - struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + tx_timeout_work); + struct net_device *dev = priv->netdev; bool reopen_channels = false; - int i; + int i, err;
- netdev_err(dev, "TX timeout detected\n"); + rtnl_lock(); + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock;
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); @@@ -3784,9 -3646,7 +3787,9 @@@
if (!netif_xmit_stopped(dev_queue)) continue; - netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + + netdev_err(dev, + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - dev_queue->trans_start));
@@@ -3799,27 -3659,8 +3802,27 @@@ } }
- if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state)) - schedule_work(&priv->tx_timeout_work); + if (!reopen_channels) + goto unlock; + + mlx5e_close_locked(dev); + err = mlx5e_open_locked(dev); + if (err) + netdev_err(priv->netdev, + "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + err); + +unlock: + mutex_unlock(&priv->state_lock); + rtnl_unlock(); +} + +static void mlx5e_tx_timeout(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + netdev_err(dev, "TX timeout detected\n"); + queue_work(priv->wq, &priv->tx_timeout_work); }
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) @@@ -3869,7 -3710,7 +3872,7 @@@ bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */ - mlx5e_set_rq_params(priv->mdev, &priv->channels.params); + mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset) mlx5e_open_locked(netdev); @@@ -4014,6 -3855,15 +4017,6 @@@ static int mlx5e_check_required_hca_cap return 0; }
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) -{ - int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; - - return bf_buf_size - - sizeof(struct mlx5e_tx_wqe) + - 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; -} - void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { @@@ -4053,20 -3903,16 +4056,20 @@@ static int mlx5e_get_pci_bw(struct mlx5 return 0; }
-static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) +static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) { - return (link_speed && pci_bw && - (pci_bw < 40000) && (pci_bw < link_speed)); -} + u32 link_speed = 0; + u32 pci_bw = 0;
-static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw) -{ - return !(link_speed && pci_bw && - (pci_bw <= 16000) && (pci_bw < link_speed)); + mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_get_pci_bw(mdev, &pci_bw); + mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); + +#define MLX5E_SLOW_PCI_RATIO (2) + + return link_speed && pci_bw && + link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; }
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) @@@ -4118,7 -3964,7 +4121,7 @@@ void mlx5e_set_rx_cq_mode_params(struc MLX5_CQ_PERIOD_MODE_START_FROM_CQE); }
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) +static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i;
@@@ -4132,15 -3978,20 +4135,15 @@@
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels) + u16 max_channels, u16 mtu) { u8 cq_period_mode = 0; - u32 link_speed = 0; - u32 pci_bw = 0;
+ params->sw_mtu = mtu; + params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = max_channels; params->num_tc = 1;
- mlx5e_get_max_linkspeed(mdev, &link_speed); - mlx5e_get_pci_bw(mdev, &pci_bw); - mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", - link_speed, pci_bw); - /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@@ -4150,23 -4001,18 +4153,23 @@@ params->rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && MLX5_CAP_GEN(mdev, vport_group_manager)) - params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); + params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */ - mlx5e_set_rq_params(mdev, params); + if (mlx5e_striding_rq_possible(mdev, params)) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, + !slow_pci_heuristic(mdev)); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params);
/* HW LRO */
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - params->lro_en = hw_lro_heuristic(link_speed, pci_bw); + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + params->lro_en = !slow_pci_heuristic(mdev); params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */ @@@ -4178,6 -4024,7 +4181,6 @@@ mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */ - params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */ @@@ -4199,9 -4046,9 +4202,9 @@@ static void mlx5e_build_nic_netdev_priv priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; - priv->hard_mtu = MLX5E_ETH_HARD_MTU;
- mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu);
mutex_init(&priv->state_lock);
@@@ -4225,7 -4072,7 +4228,7 @@@ static void mlx5e_set_netdev_dev_addr(s } }
- #if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) + #if IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; @@@ -4260,9 -4107,6 +4263,9 @@@ static void mlx5e_build_nic_netdev(stru netdev->vlan_features |= NETIF_F_RXCSUM; netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (!!MLX5_CAP_ETH(mdev, lro_cap)) netdev->vlan_features |= NETIF_F_LRO;
@@@ -4334,7 -4178,7 +4337,7 @@@
mlx5e_set_netdev_dev_addr(netdev);
- #if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) + #if IS_ENABLED(CONFIG_MLX5_ESWITCH) if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@@ -4342,7 -4186,7 +4345,7 @@@ mlx5e_ipsec_build_netdev(priv); }
-static void mlx5e_create_q_counter(struct mlx5e_priv *priv) +static void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@@ -4352,21 -4196,14 +4355,21 @@@ mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); priv->q_counter = 0; } + + err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter); + if (err) { + mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err); + priv->drop_rq_q_counter = 0; + } }
-static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) +static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { - if (!priv->q_counter) - return; + if (priv->q_counter) + mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); + if (priv->drop_rq_q_counter) + mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter); }
static void mlx5e_nic_init(struct mlx5_core_dev *mdev, @@@ -4478,7 -4315,7 +4481,7 @@@ static void mlx5e_nic_enable(struct mlx /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev); @@@ -4493,12 -4330,6 +4496,6 @@@ #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_init_app(priv); #endif - /* Device already registered: sync netdev system state */ - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - udp_tunnel_get_rx_info(netdev); - rtnl_unlock(); - }
queue_work(priv->wq, &priv->set_rx_mode_work);
@@@ -4605,18 -4436,18 +4602,18 @@@ int mlx5e_attach_netdev(struct mlx5e_pr if (err) goto out;
- err = mlx5e_open_drop_rq(mdev, &priv->drop_rq); + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_cleanup_tx; + goto err_destroy_q_counters; }
err = profile->init_rx(priv); if (err) goto err_close_drop_rq;
- mlx5e_create_q_counter(priv); - if (profile->enable) profile->enable(priv);
@@@ -4625,8 -4456,7 +4622,8 @@@ err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq);
-err_cleanup_tx: +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv);
out: @@@ -4643,9 -4473,9 +4640,9 @@@ void mlx5e_detach_netdev(struct mlx5e_p profile->disable(priv); flush_workqueue(priv->wq);
- mlx5e_destroy_q_counter(priv); profile->cleanup_rx(priv); mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 8e70fa9ef39a,500d817d2b0a..d8f68e4d1018 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@@ -44,6 -44,11 +44,11 @@@ #include "en_tc.h" #include "fs_core.h"
+ #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) + #define MLX5E_REP_PARAMS_LOG_RQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE) + static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
static void mlx5e_rep_get_drvinfo(struct net_device *dev, @@@ -209,7 -214,7 +214,7 @@@ static void mlx5e_sqs2vport_stop(struc
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, - u16 *sqns_array, int sqns_num) + u32 *sqns_array, int sqns_num) { struct mlx5_flow_handle *flow_rule; struct mlx5e_rep_priv *rpriv; @@@ -255,9 -260,9 +260,9 @@@ int mlx5e_add_sqs_fwd_rules(struct mlx5 struct mlx5e_channel *c; int n, tc, num_sqs = 0; int err = -ENOMEM; - u16 *sqs; + u32 *sqs;
- sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL); + sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL); if (!sqs) goto out;
@@@ -288,7 -293,7 +293,7 @@@ void mlx5e_remove_sqs_fwd_rules(struct static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) { #if IS_ENABLED(CONFIG_IPV6) - unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, + unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME); #else unsigned long ipv6_interval = ~0UL; @@@ -424,7 -429,7 +429,7 @@@ static int mlx5e_rep_netevent_event(str case NETEVENT_NEIGH_UPDATE: n = ptr; #if IS_ENABLED(CONFIG_IPV6) - if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) + if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) #else if (n->tbl != &arp_tbl) #endif @@@ -472,7 -477,7 +477,7 @@@ * done per device delay prob time parameter. */ #if IS_ENABLED(CONFIG_IPV6) - if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) + if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) #else if (!p->dev || p->tbl != &arp_tbl) #endif @@@ -668,7 -673,6 +673,6 @@@ static int mlx5e_rep_open(struct net_de struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err;
mutex_lock(&priv->state_lock); @@@ -676,8 -680,9 +680,9 @@@ if (err) goto unlock;
- if (!mlx5_eswitch_set_vport_state(esw, rep->vport, - MLX5_ESW_VPORT_ADMIN_STATE_UP)) + if (!mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev);
unlock: @@@ -690,11 -695,12 +695,12 @@@ static int mlx5e_rep_close(struct net_d struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret;
mutex_lock(&priv->state_lock); - (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@@ -877,14 -883,14 +883,14 @@@ static void mlx5e_build_rep_params(stru MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ params->hard_mtu = MLX5E_ETH_HARD_MTU; - params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - params->log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE; ++ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->num_tc = 1; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@@ -899,9 -905,7 +905,7 @@@ static void mlx5e_build_rep_netdev(stru
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
- #ifdef CONFIG_NET_SWITCHDEV netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; - #endif
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; @@@ -927,6 -931,8 +931,6 @@@ static void mlx5e_init_rep(struct mlx5_
priv->channels.params.num_channels = profile->max_nch(mdev);
- priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_rep_params(mdev, &priv->channels.params); mlx5e_build_rep_netdev(netdev);
@@@ -1154,15 -1160,6 +1158,15 @@@ mlx5e_vport_rep_unload(struct mlx5_eswi kfree(ppriv); /* mlx5e_rep_priv */ }
+static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + + rpriv = mlx5e_rep_to_rep_priv(rep); + + return rpriv->netdev; +} + static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -1175,7 -1172,6 +1179,7 @@@
rep_if.load = mlx5e_vport_rep_load; rep_if.unload = mlx5e_vport_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); } } @@@ -1203,7 -1199,6 +1207,7 @@@ void mlx5e_register_vport_reps(struct m
rep_if.load = mlx5e_nic_rep_load; rep_if.unload = mlx5e_nic_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; rep_if.priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/ diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3e4a7e81b67f,43234cabf444..4197001f9801 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@@ -675,7 -675,6 +675,7 @@@ mlx5e_tc_add_nic_flow(struct mlx5e_pri struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, + .has_flow_tag = true, .flow_tag = attr->flow_tag, .encap_id = 0, }; @@@ -964,7 -963,7 +964,7 @@@ void mlx5e_tc_update_neigh_used_value(s tbl = &arp_tbl; #if IS_ENABLED(CONFIG_IPV6) else if (m_neigh->family == AF_INET6) - tbl = ipv6_stub->nd_tbl; + tbl = &nd_tbl; #endif else return; @@@ -2530,17 -2529,12 +2530,17 @@@ static int parse_tc_fdb_actions(struct if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a)) - return -EOPNOTSUPP; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - attr->vlan = tcf_vlan_push_vid(a); + attr->vlan_vid = tcf_vlan_push_vid(a); + if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { + attr->vlan_prio = tcf_vlan_push_prio(a); + attr->vlan_proto = tcf_vlan_push_proto(a); + if (!attr->vlan_proto) + attr->vlan_proto = htons(ETH_P_8021Q); + } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a)) { + return -EOPNOTSUPP; + } } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } @@@ -2614,19 -2608,19 +2614,19 @@@ int mlx5e_configure_flower(struct mlx5e if (err != -EAGAIN) flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || + !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) + kvfree(parse_attr); + err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); - if (err) - goto err_del_rule; + if (err) { + mlx5e_tc_del_flow(priv, flow); + kfree(flow); + }
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH && - !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) - kvfree(parse_attr); return err;
- err_del_rule: - mlx5e_tc_del_flow(priv, flow); - err_free: kvfree(parse_attr); kfree(flow); diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index a9ccd974c620,997e24dcb053..1904c0323d39 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@@ -1,10 -1,10 +1,10 @@@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c - * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko jiri@mellanox.com * Copyright (c) 2016 Ido Schimmel idosch@mellanox.com * Copyright (c) 2016 Yotam Gigi yotamg@mellanox.com - * Copyright (c) 2017 Petr Machata petrm@mellanox.com + * Copyright (c) 2017-2018 Petr Machata petrm@mellanox.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@@ -70,7 -70,6 +70,7 @@@ #include "spectrum_mr.h" #include "spectrum_mr_tcam.h" #include "spectrum_router.h" +#include "spectrum_span.h"
struct mlxsw_sp_fib; struct mlxsw_sp_vr; @@@ -467,7 -466,7 +467,7 @@@ struct mlxsw_sp_vr unsigned int rif_count; struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; - struct mlxsw_sp_mr_table *mr4_table; + struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX]; };
static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@@ -711,9 -710,7 +711,9 @@@ static void mlxsw_sp_lpm_fini(struct ml
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table; + return !!vr->fib4 || !!vr->fib6 || + !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] || + !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; }
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@@ -791,7 -788,7 +791,7 @@@ static struct mlxsw_sp_vr *mlxsw_sp_vr_ u32 tb_id, struct netlink_ext_ack *extack) { - struct mlxsw_sp_mr_table *mr4_table; + struct mlxsw_sp_mr_table *mr4_table, *mr6_table; struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; struct mlxsw_sp_vr *vr; @@@ -799,7 -796,7 +799,7 @@@
vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) { - NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers"); return ERR_PTR(-EBUSY); } fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); @@@ -814,25 -811,15 +814,25 @@@ MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(mr4_table)) { err = PTR_ERR(mr4_table); - goto err_mr_table_create; + goto err_mr4_table_create; } + mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(mr6_table)) { + err = PTR_ERR(mr6_table); + goto err_mr6_table_create; + } + vr->fib4 = fib4; vr->fib6 = fib6; - vr->mr4_table = mr4_table; + vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table; + vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table; vr->tb_id = tb_id; return vr;
-err_mr_table_create: +err_mr6_table_create: + mlxsw_sp_mr_table_destroy(mr4_table); +err_mr4_table_create: mlxsw_sp_fib_destroy(mlxsw_sp, fib6); err_fib6_create: mlxsw_sp_fib_destroy(mlxsw_sp, fib4); @@@ -842,10 -829,8 +842,10 @@@ static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) { - mlxsw_sp_mr_table_destroy(vr->mr4_table); - vr->mr4_table = NULL; + mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]); + vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL; + mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]); + vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL; mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); vr->fib6 = NULL; mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); @@@ -868,8 -853,7 +868,8 @@@ static void mlxsw_sp_vr_put(struct mlxs { if (!vr->rif_count && list_empty(&vr->fib4->node_list) && list_empty(&vr->fib6->node_list) && - mlxsw_sp_mr_table_empty(vr->mr4_table)) + mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) && + mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6])) mlxsw_sp_vr_destroy(mlxsw_sp, vr); }
@@@ -1040,11 -1024,9 +1040,11 @@@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_ enum mlxsw_sp_ipip_type ipipt, struct net_device *ol_dev) { + const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_ipip_entry *ret = NULL;
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); if (!ipip_entry) return ERR_PTR(-ENOMEM); @@@ -1058,15 -1040,7 +1058,15 @@@
ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; - ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev); + + switch (ipip_ops->ul_proto) { + case MLXSW_SP_L3_PROTO_IPV4: + ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON(1); + break; + }
return ipip_entry;
@@@ -1406,6 -1380,55 +1406,55 @@@ mlxsw_sp_ipip_entry_ol_up_event(struct decap_fib_entry); }
+ static int + mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, + struct mlxsw_sp_vr *ul_vr, bool enable) + { + struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; + struct mlxsw_sp_rif *rif = &lb_rif->common; + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u32 saddr4; + + switch (lb_cf.ul_protocol) { + case MLXSW_SP_L3_PROTO_IPV4: + saddr4 = be32_to_cpu(lb_cf.saddr.addr4); + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, + MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, + ul_vr->id, saddr4, lb_cf.okey); + break; + + case MLXSW_SP_L3_PROTO_IPV6: + return -EAFNOSUPPORT; + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + } + + static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) + { + struct mlxsw_sp_ipip_entry *ipip_entry; + struct mlxsw_sp_rif_ipip_lb *lb_rif; + struct mlxsw_sp_vr *ul_vr; + int err = 0; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (ipip_entry) { + lb_rif = ipip_entry->ol_lb; + ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; + err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true); + if (err) + goto out; + lb_rif->common.mtu = ol_dev->mtu; + } + + out: + return err; + } + static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev) { @@@ -1686,6 -1709,8 +1735,8 @@@ int mlxsw_sp_netdevice_ipip_ol_event(st extack = info->extack; return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp, ol_dev, extack); + case NETDEV_CHANGEMTU: + return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev); } return 0; } @@@ -2346,8 -2371,6 +2397,8 @@@ static void mlxsw_sp_router_neigh_event read_unlock_bh(&n->lock);
rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + entry_connected = nud_state & NUD_VALID && !dead; neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!entry_connected && !neigh_entry) @@@ -2445,8 -2468,7 +2496,8 @@@ static int mlxsw_sp_router_netevent_eve mlxsw_core_schedule_work(&net_work->work); mlxsw_sp_port_dev_put(mlxsw_sp_port); break; - case NETEVENT_MULTIPATH_HASH_UPDATE: + case NETEVENT_IPV4_MPATH_HASH_UPDATE: + case NETEVENT_IPV6_MPATH_HASH_UPDATE: net = ptr;
if (!net_eq(net, &init_net)) @@@ -5393,20 -5415,10 +5444,20 @@@ static int __mlxsw_sp_router_set_abort_ return 0; }
+static struct mlxsw_sp_mr_table * +mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family) +{ + if (family == RTNL_FAMILY_IPMR) + return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]; + else + return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; +} + static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, struct mfc_entry_notifier_info *men_info, bool replace) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted) @@@ -5416,14 -5428,12 +5467,14 @@@ if (IS_ERR(vr)) return PTR_ERR(vr);
- return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); + return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace); }
static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, struct mfc_entry_notifier_info *men_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted) @@@ -5433,8 -5443,7 +5484,8 @@@ if (WARN_ON(!vr)) return;
- mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); + mlxsw_sp_mr_route_del(mrt, men_info->mfc); mlxsw_sp_vr_put(mlxsw_sp, vr); }
@@@ -5442,7 -5451,6 +5493,7 @@@ static in mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, struct vif_entry_notifier_info *ven_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_rif *rif; struct mlxsw_sp_vr *vr;
@@@ -5453,9 -5461,8 +5504,9 @@@ if (IS_ERR(vr)) return PTR_ERR(vr);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev); - return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev, + return mlxsw_sp_mr_vif_add(mrt, ven_info->dev, ven_info->vif_index, ven_info->vif_flags, rif); } @@@ -5464,7 -5471,6 +5515,7 @@@ static voi mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, struct vif_entry_notifier_info *ven_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted) @@@ -5474,8 -5480,7 +5525,8 @@@ if (WARN_ON(!vr)) return;
- mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); + mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index); mlxsw_sp_vr_put(mlxsw_sp, vr); }
@@@ -5567,7 -5572,7 +5618,7 @@@ static void mlxsw_sp_vr_fib_flush(struc
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { - int i; + int i, j;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; @@@ -5575,8 -5580,7 +5626,8 @@@ if (!mlxsw_sp_vr_is_used(vr)) continue;
- mlxsw_sp_mr_table_flush(vr->mr4_table); + for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++) + mlxsw_sp_mr_table_flush(vr->mr_table[j]); mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
/* If virtual router was only used for IPv4, then it's no @@@ -5626,8 -5630,6 +5677,8 @@@ static void mlxsw_sp_router_fib4_event_
/* Protect internal structures from changes */ rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_APPEND: /* fall through */ @@@ -5670,8 -5672,6 +5721,8 @@@ static void mlxsw_sp_router_fib6_event_ int err;
rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_ADD: @@@ -5715,11 -5715,11 +5766,11 @@@ static void mlxsw_sp_router_fibmr_event replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); - ipmr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_work->men_info.mfc); break; case FIB_EVENT_ENTRY_DEL: mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); - ipmr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_work->men_info.mfc); break; case FIB_EVENT_VIF_ADD: err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, @@@ -5799,7 -5799,7 +5850,7 @@@ mlxsw_sp_router_fibmr_event(struct mlxs case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); - ipmr_cache_hold(fib_work->men_info.mfc); + mr_cache_hold(fib_work->men_info.mfc); break; case FIB_EVENT_VIF_ADD: /* fall through */ case FIB_EVENT_VIF_DEL: @@@ -5841,14 -5841,10 +5892,14 @@@ static int mlxsw_sp_router_fib_rule_eve if (!ipmr_rule_default(rule) && !rule->l3mdev) err = -1; break; + case RTNL_FAMILY_IP6MR: + if (!ip6mr_rule_default(rule) && !rule->l3mdev) + err = -1; + break; }
if (err < 0) - NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload"); + NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
return err; } @@@ -5864,8 -5860,7 +5915,8 @@@ static int mlxsw_sp_router_fib_event(st
if (!net_eq(info->net, &init_net) || (info->family != AF_INET && info->family != AF_INET6 && - info->family != RTNL_FAMILY_IPMR)) + info->family != RTNL_FAMILY_IPMR && + info->family != RTNL_FAMILY_IP6MR)) return NOTIFY_DONE;
router = container_of(nb, struct mlxsw_sp_router, fib_nb); @@@ -5895,7 -5890,6 +5946,7 @@@ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); mlxsw_sp_router_fib6_event(fib_work, info); break; + case RTNL_FAMILY_IP6MR: case RTNL_FAMILY_IPMR: INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); mlxsw_sp_router_fibmr_event(fib_work, info); @@@ -6077,7 -6071,7 +6128,7 @@@ mlxsw_sp_rif_create(struct mlxsw_sp *ml struct mlxsw_sp_rif *rif; struct mlxsw_sp_vr *vr; u16 rif_index; - int err; + int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); ops = mlxsw_sp->router->rif_ops_arr[type]; @@@ -6089,7 -6083,7 +6140,7 @@@
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); if (err) { - NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); goto err_rif_index_alloc; }
@@@ -6117,11 -6111,9 +6168,11 @@@ if (err) goto err_configure;
- err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif); - if (err) - goto err_mr_rif_add; + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) { + err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif); + if (err) + goto err_mr_rif_add; + }
mlxsw_sp_rif_counters_alloc(rif); mlxsw_sp->router->rifs[rif_index] = rif; @@@ -6129,8 -6121,6 +6180,8 @@@ return rif;
err_mr_rif_add: + for (i--; i >= 0; i--) + mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); err_configure: if (fid) @@@ -6150,15 -6140,13 +6201,15 @@@ void mlxsw_sp_rif_destroy(struct mlxsw_ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; struct mlxsw_sp_fid *fid = rif->fid; struct mlxsw_sp_vr *vr; + int i;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); vr = &mlxsw_sp->router->vrs[rif->vr_id];
mlxsw_sp->router->rifs[rif->rif_index] = NULL; mlxsw_sp_rif_counters_free(rif); - mlxsw_sp_mr_rif_del(vr->mr4_table, rif); + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) + mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); if (fid) /* Loopback RIFs are not associated with a FID. */ @@@ -6565,16 -6553,13 +6616,16 @@@ int mlxsw_sp_netdevice_router_port_even
if (rif->mtu != dev->mtu) { struct mlxsw_sp_vr *vr; + int i;
/* The RIF is relevant only to its mr_table instance, as unlike * unicast routing, in multicast routing a RIF cannot be shared * between several multicast routing tables. */ vr = &mlxsw_sp->router->vrs[rif->vr_id]; - mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu); + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) + mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i], + rif, dev->mtu); }
ether_addr_copy(rif->addr, dev->dev_addr); @@@ -6910,33 -6895,6 +6961,6 @@@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw }
static int - mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, - struct mlxsw_sp_vr *ul_vr, bool enable) - { - struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; - struct mlxsw_sp_rif *rif = &lb_rif->common; - struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; - char ritr_pl[MLXSW_REG_RITR_LEN]; - u32 saddr4; - - switch (lb_cf.ul_protocol) { - case MLXSW_SP_L3_PROTO_IPV4: - saddr4 = be32_to_cpu(lb_cf.saddr.addr4); - mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, - rif->rif_index, rif->vr_id, rif->dev->mtu); - mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, - MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, - ul_vr->id, saddr4, lb_cf.okey); - break; - - case MLXSW_SP_L3_PROTO_IPV6: - return -EAFNOSUPPORT; - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - } - - static int mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) { struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); @@@ -7079,25 -7037,13 +7103,25 @@@ static void mlxsw_sp_mp4_hash_init(cha
static void mlxsw_sp_mp6_hash_init(char *recr2_pl) { + bool only_l3 = !ip6_multipath_hash_policy(&init_net); + mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP); mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP); mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl); mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl); - mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL); mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER); + if (only_l3) { + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_IPV6_FLOW_LABEL); + } else { + mlxsw_sp_mp_hash_header_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_EN_IPV6); + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_SPORT); + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_DPORT); + } }
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) diff --combined drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4b631e26f199,ecd7c33baf3c..29b4e5f8c102 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@@ -74,7 -74,9 +74,9 @@@ nfp_meta_has_prev(struct nfp_prog *nfp_
static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) { - if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { + if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { + pr_warn("instruction limit reached (%u NFP instructions)\n", + nfp_prog->prog_len); nfp_prog->error = -ENOSPC; return; } @@@ -103,18 -105,23 +105,18 @@@ nfp_prog_confirm_current_offset(struct /* --- Emitters --- */ static void __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, - u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) + u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, + bool indir) { - enum cmd_ctx_swap ctx; u64 insn;
- if (sync) - ctx = CMD_CTX_SWAP; - else - ctx = CMD_CTX_NO_SWAP; - insn = FIELD_PREP(OP_CMD_A_SRC, areg) | FIELD_PREP(OP_CMD_CTX, ctx) | FIELD_PREP(OP_CMD_B_SRC, breg) | FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | FIELD_PREP(OP_CMD_XFER, xfer) | FIELD_PREP(OP_CMD_CNT, size) | - FIELD_PREP(OP_CMD_SIG, sync) | + FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | FIELD_PREP(OP_CMD_INDIR, indir) | FIELD_PREP(OP_CMD_MODE, mode); @@@ -124,7 -131,7 +126,7 @@@
static void emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, - swreg lreg, swreg rreg, u8 size, bool sync, bool indir) + swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) { struct nfp_insn_re_regs reg; int err; @@@ -145,22 -152,22 +147,22 @@@ return; }
- __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, + __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, indir); }
static void emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, - swreg lreg, swreg rreg, u8 size, bool sync) + swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) { - emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); + emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); }
static void emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, - swreg lreg, swreg rreg, u8 size, bool sync) + swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) { - emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); + emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); }
static void @@@ -405,7 -412,7 +407,7 @@@ __emit_lcsr(struct nfp_prog *nfp_prog, FIELD_PREP(OP_LCSR_A_SRC, areg) | FIELD_PREP(OP_LCSR_B_SRC, breg) | FIELD_PREP(OP_LCSR_WRITE, wr) | - FIELD_PREP(OP_LCSR_ADDR, addr) | + FIELD_PREP(OP_LCSR_ADDR, addr / 4) | FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
@@@ -433,16 -440,10 +435,16 @@@ static void emit_csr_wr(struct nfp_pro return; }
- __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, + __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, false, reg.src_lmextn); }
+/* CSR value is read in following immed[gpr, 0] */ +static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) +{ + __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); +} + static void emit_nop(struct nfp_prog *nfp_prog) { __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); @@@ -554,19 -555,6 +556,19 @@@ wrp_reg_subpart(struct nfp_prog *nfp_pr emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); }
+/* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the + * result to @dst from offset, there is no change on the other bits of @dst. + */ +static void +wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, + u8 field_len, u8 offset) +{ + enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; + u8 mask = ((1 << field_len) - 1) << offset; + + emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); +} + static void addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, swreg *rega, swreg *regb) @@@ -611,7 -599,7 +613,7 @@@ static int nfp_cpp_memcpy(struct nfp_pr /* Memory read from source addr into transfer-in registers. */ emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, - src_base, off, xfer_num - 1, true, len > 32); + src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32);
/* Move from transfer-in to transfer-out. */ for (i = 0; i < xfer_num; i++) @@@ -623,39 -611,39 +625,39 @@@ /* Use single direct_ref write8. */ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, len - 1, - true); + CMD_CTX_SWAP); } else if (len <= 32 && IS_ALIGNED(len, 4)) { /* Use single direct_ref write32. */ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, - true); + CMD_CTX_SWAP); } else if (len <= 32) { /* Use single indirect_ref write8. */ wrp_immed(nfp_prog, reg_none(), CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, - len - 1, true); + len - 1, CMD_CTX_SWAP); } else if (IS_ALIGNED(len, 4)) { /* Use single indirect_ref write32. */ wrp_immed(nfp_prog, reg_none(), CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, - xfer_num - 1, true); + xfer_num - 1, CMD_CTX_SWAP); } else if (len <= 40) { /* Use one direct_ref write32 to write the first 32-bytes, then * another direct_ref write8 to write the remaining bytes. */ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, 7, - true); + CMD_CTX_SWAP);
off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, imm_b(nfp_prog)); emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, reg_a(meta->paired_st->dst_reg * 2), off, len - 33, - true); + CMD_CTX_SWAP); } else { /* Use one indirect_ref write32 to write 4-bytes aligned length, * then another direct_ref write8 to write the remaining bytes. @@@ -666,12 -654,12 +668,12 @@@ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, - xfer_num - 2, true); + xfer_num - 2, CMD_CTX_SWAP); new_off = meta->paired_st->off + (xfer_num - 1) * 4; off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, - (len & 0x3) - 1, true); + (len & 0x3) - 1, CMD_CTX_SWAP); }
/* TODO: The following extra load is to make sure data flow be identical @@@ -732,7 -720,7 +734,7 @@@ data_ld(struct nfp_prog *nfp_prog, swre shift = size < 4 ? 4 - size : 0;
emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, - pptr_reg(nfp_prog), offset, sz - 1, true); + pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP);
i = 0; if (shift) @@@ -762,7 -750,7 +764,7 @@@ data_ld_host_order(struct nfp_prog *nfp mask = size < 4 ? GENMASK(size - 1, 0) : 0;
emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, - lreg, rreg, sz / 4 - 1, true); + lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP);
i = 0; if (mask) @@@ -842,7 -830,7 +844,7 @@@ data_stx_host_order(struct nfp_prog *nf wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, - reg_a(dst_gpr), offset, size - 1, true); + reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
return 0; } @@@ -856,7 -844,7 +858,7 @@@ data_st_host_order(struct nfp_prog *nfp wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, - reg_a(dst_gpr), offset, size - 1, true); + reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
return 0; } @@@ -1353,7 -1341,7 +1355,7 @@@ static int adjust_head(struct nfp_prog }
static int -map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { struct bpf_offloaded_map *offmap; struct nfp_bpf_map *nfp_map; @@@ -1367,21 -1355,19 +1369,21 @@@
/* We only have to reload LM0 if the key is not at start of stack */ lm_off = nfp_prog->stack_depth; - lm_off += meta->arg2.var_off.value + meta->arg2.off; - load_lm_ptr = meta->arg2_var_off || lm_off; + lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; + load_lm_ptr = meta->arg2.var_off || lm_off;
/* Set LM0 to start of key */ if (load_lm_ptr) emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); + if (meta->func_id == BPF_FUNC_map_update_elem) + emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
/* Load map ID into a register, it should actually fit as an immediate * but in case it doesn't deal with it here, not in the delay slots. */ tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
- emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem, + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 2, RELO_BR_HELPER); ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
@@@ -1404,18 -1390,6 +1406,18 @@@ return 0; }
+static int +nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); + /* CSR value is read in following immed[gpr, 0] */ + emit_immed(nfp_prog, reg_both(0), 0, + IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); + emit_immed(nfp_prog, reg_both(1), 0, + IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); + return 0; +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@@ -1866,128 -1840,6 +1868,128 @@@ mem_ldx_emem(struct nfp_prog *nfp_prog tmp_reg, meta->insn.dst_reg * 2, size); }
+static void +mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta) +{ + s16 range_start = meta->pkt_cache.range_start; + s16 range_end = meta->pkt_cache.range_end; + swreg src_base, off; + u8 xfer_num, len; + bool indir; + + off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); + src_base = reg_a(meta->insn.src_reg * 2); + len = range_end - range_start; + xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; + + indir = len > 8 * REG_WIDTH; + /* Setup PREV_ALU for indirect mode. */ + if (indir) + wrp_immed(nfp_prog, reg_none(), + CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); + + /* Cache memory into transfer-in registers. */ + emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, + off, xfer_num - 1, CMD_CTX_SWAP, indir); +} + +static int +mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, + unsigned int size) +{ + s16 range_start = meta->pkt_cache.range_start; + s16 insn_off = meta->insn.off - range_start; + swreg dst_lo, dst_hi, src_lo, src_mid; + u8 dst_gpr = meta->insn.dst_reg * 2; + u8 len_lo = size, len_mid = 0; + u8 idx = insn_off / REG_WIDTH; + u8 off = insn_off % REG_WIDTH; + + dst_hi = reg_both(dst_gpr + 1); + dst_lo = reg_both(dst_gpr); + src_lo = reg_xfer(idx); + + /* The read length could involve as many as three registers. */ + if (size > REG_WIDTH - off) { + /* Calculate the part in the second register. */ + len_lo = REG_WIDTH - off; + len_mid = size - len_lo; + + /* Calculate the part in the third register. */ + if (size > 2 * REG_WIDTH - off) + len_mid = REG_WIDTH; + } + + wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); + + if (!len_mid) { + wrp_immed(nfp_prog, dst_hi, 0); + return 0; + } + + src_mid = reg_xfer(idx + 1); + + if (size <= REG_WIDTH) { + wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); + wrp_immed(nfp_prog, dst_hi, 0); + } else { + swreg src_hi = reg_xfer(idx + 2); + + wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, + REG_WIDTH - len_lo, len_lo); + wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, + REG_WIDTH - len_lo); + wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, + len_lo); + } + + return 0; +} + +static int +mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, + unsigned int size) +{ + swreg dst_lo, dst_hi, src_lo; + u8 dst_gpr, idx; + + idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; + dst_gpr = meta->insn.dst_reg * 2; + dst_hi = reg_both(dst_gpr + 1); + dst_lo = reg_both(dst_gpr); + src_lo = reg_xfer(idx); + + if (size < REG_WIDTH) { + wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); + wrp_immed(nfp_prog, dst_hi, 0); + } else if (size == REG_WIDTH) { + wrp_mov(nfp_prog, dst_lo, src_lo); + wrp_immed(nfp_prog, dst_hi, 0); + } else { + swreg src_hi = reg_xfer(idx + 1); + + wrp_mov(nfp_prog, dst_lo, src_lo); + wrp_mov(nfp_prog, dst_hi, src_hi); + } + + return 0; +} + +static int +mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, unsigned int size) +{ + u8 off = meta->insn.off - meta->pkt_cache.range_start; + + if (IS_ALIGNED(off, REG_WIDTH)) + return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); + + return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); +} + static int mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) @@@ -2002,16 -1854,8 +2004,16 @@@ return mem_ldx_skb(nfp_prog, meta, size); }
- if (meta->ptr.type == PTR_TO_PACKET) - return mem_ldx_data(nfp_prog, meta, size); + if (meta->ptr.type == PTR_TO_PACKET) { + if (meta->pkt_cache.range_end) { + if (meta->pkt_cache.do_init) + mem_ldx_data_init_pktcache(nfp_prog, meta); + + return mem_ldx_data_from_pktcache(nfp_prog, meta, size); + } else { + return mem_ldx_data(nfp_prog, meta, size); + } + }
if (meta->ptr.type == PTR_TO_STACK) return mem_ldx_stack(nfp_prog, meta, size, @@@ -2140,111 -1984,6 +2142,111 @@@ static int mem_stx8(struct nfp_prog *nf return mem_stx(nfp_prog, meta, 8); }
+static int +mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) +{ + u8 dst_gpr = meta->insn.dst_reg * 2; + u8 src_gpr = meta->insn.src_reg * 2; + unsigned int full_add, out; + swreg addra, addrb, off; + + off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + + /* We can fit 16 bits into command immediate, if we know the immediate + * is guaranteed to either always or never fit into 16 bit we only + * generate code to handle that particular case, otherwise generate + * code for both. + */ + out = nfp_prog_current_offset(nfp_prog); + full_add = nfp_prog_current_offset(nfp_prog); + + if (meta->insn.off) { + out += 2; + full_add += 2; + } + if (meta->xadd_maybe_16bit) { + out += 3; + full_add += 3; + } + if (meta->xadd_over_16bit) + out += 2 + is64; + if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { + out += 5; + full_add += 5; + } + + /* Generate the branch for choosing add_imm vs add */ + if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { + swreg max_imm = imm_a(nfp_prog); + + wrp_immed(nfp_prog, max_imm, 0xffff); + emit_alu(nfp_prog, reg_none(), + max_imm, ALU_OP_SUB, reg_b(src_gpr)); + emit_alu(nfp_prog, reg_none(), + reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); + emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); + /* defer for add */ + } + + /* If insn has an offset add to the address */ + if (!meta->insn.off) { + addra = reg_a(dst_gpr); + addrb = reg_b(dst_gpr + 1); + } else { + emit_alu(nfp_prog, imma_a(nfp_prog), + reg_a(dst_gpr), ALU_OP_ADD, off); + emit_alu(nfp_prog, imma_b(nfp_prog), + reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); + addra = imma_a(nfp_prog); + addrb = imma_b(nfp_prog); + } + + /* Generate the add_imm if 16 bits are possible */ + if (meta->xadd_maybe_16bit) { + swreg prev_alu = imm_a(nfp_prog); + + wrp_immed(nfp_prog, prev_alu, + FIELD_PREP(CMD_OVE_DATA, 2) | + CMD_OVE_LEN | + FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); + wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); + emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, + addra, addrb, 0, CMD_CTX_NO_SWAP); + + if (meta->xadd_over_16bit) + emit_br(nfp_prog, BR_UNC, out, 0); + } + + if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) + return -EINVAL; + + /* Generate the add if 16 bits are not guaranteed */ + if (meta->xadd_over_16bit) { + emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, + addra, addrb, is64 << 2, + is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); + + wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); + if (is64) + wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); + } + + if (!nfp_prog_confirm_current_offset(nfp_prog, out)) + return -EINVAL; + + return 0; +} + +static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_xadd(nfp_prog, meta, false); +} + +static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_xadd(nfp_prog, meta, true); +} + static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); @@@ -2446,11 -2185,7 +2448,11 @@@ static int call(struct nfp_prog *nfp_pr case BPF_FUNC_xdp_adjust_head: return adjust_head(nfp_prog, meta); case BPF_FUNC_map_lookup_elem: - return map_lookup_stack(nfp_prog, meta); + case BPF_FUNC_map_update_elem: + case BPF_FUNC_map_delete_elem: + return map_call_stack_common(nfp_prog, meta); + case BPF_FUNC_get_prandom_u32: + return nfp_get_prandom_u32(nfp_prog, meta); default: WARN_ONCE(1, "verifier allowed unsupported function\n"); return -EOPNOTSUPP; @@@ -2510,8 -2245,6 +2512,8 @@@ static const instr_cb_t instr_cb[256] [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, + [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, + [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, [BPF_ST | BPF_MEM | BPF_B] = mem_st1, [BPF_ST | BPF_MEM | BPF_H] = mem_st2, [BPF_ST | BPF_MEM | BPF_W] = mem_st4, @@@ -2732,6 -2465,8 +2734,8 @@@ static int nfp_translate(struct nfp_pro err = cb(nfp_prog, meta); if (err) return err; + if (nfp_prog->error) + return nfp_prog->error;
nfp_prog->n_translated++; } @@@ -3090,120 -2825,6 +3094,120 @@@ static void nfp_bpf_opt_ldst_gather(str } }
+static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta, *range_node = NULL; + s16 range_start = 0, range_end = 0; + bool cache_avail = false; + struct bpf_insn *insn; + s32 range_ptr_off = 0; + u32 range_ptr_id = 0; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (meta->flags & FLAG_INSN_IS_JUMP_DST) + cache_avail = false; + + if (meta->skip) + continue; + + insn = &meta->insn; + + if (is_mbpf_store_pkt(meta) || + insn->code == (BPF_JMP | BPF_CALL) || + is_mbpf_classic_store_pkt(meta) || + is_mbpf_classic_load(meta)) { + cache_avail = false; + continue; + } + + if (!is_mbpf_load(meta)) + continue; + + if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { + cache_avail = false; + continue; + } + + if (!cache_avail) { + cache_avail = true; + if (range_node) + goto end_current_then_start_new; + goto start_new; + } + + /* Check ID to make sure two reads share the same + * variable offset against PTR_TO_PACKET, and check OFF + * to make sure they also share the same constant + * offset. + * + * OFFs don't really need to be the same, because they + * are the constant offsets against PTR_TO_PACKET, so + * for different OFFs, we could canonicalize them to + * offsets against original packet pointer. We don't + * support this. + */ + if (meta->ptr.id == range_ptr_id && + meta->ptr.off == range_ptr_off) { + s16 new_start = range_start; + s16 end, off = insn->off; + s16 new_end = range_end; + bool changed = false; + + if (off < range_start) { + new_start = off; + changed = true; + } + + end = off + BPF_LDST_BYTES(insn); + if (end > range_end) { + new_end = end; + changed = true; + } + + if (!changed) + continue; + + if (new_end - new_start <= 64) { + /* Install new range. */ + range_start = new_start; + range_end = new_end; + continue; + } + } + +end_current_then_start_new: + range_node->pkt_cache.range_start = range_start; + range_node->pkt_cache.range_end = range_end; +start_new: + range_node = meta; + range_node->pkt_cache.do_init = true; + range_ptr_id = range_node->ptr.id; + range_ptr_off = range_node->ptr.off; + range_start = insn->off; + range_end = insn->off + BPF_LDST_BYTES(insn); + } + + if (range_node) { + range_node->pkt_cache.range_start = range_start; + range_node->pkt_cache.range_end = range_end; + } + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (meta->skip) + continue; + + if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { + if (meta->pkt_cache.do_init) { + range_start = meta->pkt_cache.range_start; + range_end = meta->pkt_cache.range_end; + } else { + meta->pkt_cache.range_start = range_start; + meta->pkt_cache.range_end = range_end; + } + } + } +} + static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) { nfp_bpf_opt_reg_init(nfp_prog); @@@ -3211,7 -2832,6 +3215,7 @@@ nfp_bpf_opt_ld_mask(nfp_prog); nfp_bpf_opt_ld_shift(nfp_prog); nfp_bpf_opt_ldst_gather(nfp_prog); + nfp_bpf_opt_pkt_cache(nfp_prog);
return 0; } @@@ -3336,12 -2956,6 +3340,12 @@@ void *nfp_bpf_relo_for_vnic(struct nfp_ case BPF_FUNC_map_lookup_elem: val = nfp_prog->bpf->helpers.map_lookup; break; + case BPF_FUNC_map_update_elem: + val = nfp_prog->bpf->helpers.map_update; + break; + case BPF_FUNC_map_delete_elem: + val = nfp_prog->bpf->helpers.map_delete; + break; default: pr_err("relocation of unknown helper %d\n", val); diff --combined drivers/net/ethernet/realtek/r8169.c index 630409e0337f,b4779acb6b5c..604ae78381ae --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -99,12 -99,12 +99,12 @@@ static const int multicast_filter_limi #define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */ -#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) -#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) -#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) -#define RTL_R8(reg) readb (ioaddr + (reg)) -#define RTL_R16(reg) readw (ioaddr + (reg)) -#define RTL_R32(reg) readl (ioaddr + (reg)) +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
enum mac_version { RTL_GIGA_MAC_VER_01 = 0, @@@ -735,6 -735,12 +735,6 @@@ struct ring_info u8 __pad[sizeof(void *) - sizeof(u32)]; };
-enum features { - RTL_FEATURE_WOL = (1 << 0), - RTL_FEATURE_MSI = (1 << 1), - RTL_FEATURE_GMII = (1 << 2), -}; - struct rtl8169_counters { __le64 tx_packets; __le64 rx_packets; @@@ -823,7 -829,7 +823,7 @@@ struct rtl8169_private void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct net_device *); unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(void __iomem *); + unsigned int (*link_ok)(struct rtl8169_private *tp); int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
@@@ -887,11 -893,6 +887,11 @@@ MODULE_FIRMWARE(FIRMWARE_8168H_2) MODULE_FIRMWARE(FIRMWARE_8107E_1); MODULE_FIRMWARE(FIRMWARE_8107E_2);
+static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + static void rtl_lock_work(struct rtl8169_private *tp) { mutex_lock(&tp->wk.mutex); @@@ -902,9 -903,9 +902,9 @@@ static void rtl_unlock_work(struct rtl8 mutex_unlock(&tp->wk.mutex); }
-static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) { - pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, force); }
@@@ -983,46 -984,56 +983,46 @@@ static bool rtl_ocp_reg_failure(struct
DECLARE_RTL_COND(rtl_ocp_gphy_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(GPHY_OCP) & OCPAR_FLAG; + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; }
static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return;
- RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); }
static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return 0;
- RTL_W32(GPHY_OCP, reg << 15); + RTL_W32(tp, GPHY_OCP, reg << 15);
return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? - (RTL_R32(GPHY_OCP) & 0xffff) : ~0; + (RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0; }
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return;
- RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data); + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); }
static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return 0;
- RTL_W32(OCPDR, reg << 15); + RTL_W32(tp, OCPDR, reg << 15);
- return RTL_R32(OCPDR); + return RTL_R32(tp, OCPDR); }
#define OCP_STD_PHY_BASE 0xa400 @@@ -1065,12 -1076,16 +1065,12 @@@ static int mac_mcu_read(struct rtl8169_
DECLARE_RTL_COND(rtl_phyar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(PHYAR) & 0x80000000; + return RTL_R32(tp, PHYAR) & 0x80000000; }
static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); /* @@@ -1082,12 -1097,13 +1082,12 @@@
static int r8169_mdio_read(struct rtl8169_private *tp, int reg) { - void __iomem *ioaddr = tp->mmio_addr; int value;
- RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16); + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? - RTL_R32(PHYAR) & 0xffff : ~0; + RTL_R32(tp, PHYAR) & 0xffff : ~0;
/* * According to hardware specs a 20us delay is required after read @@@ -1100,14 -1116,18 +1100,14 @@@
DECLARE_RTL_COND(rtl_ocpar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(OCPAR) & OCPAR_FLAG; + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; }
static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(EPHY_RXER_NUM, 0); + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0);
rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); } @@@ -1120,46 -1140,51 +1120,46 @@@ static void r8168dp_1_mdio_write(struc
static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) { - void __iomem *ioaddr = tp->mmio_addr; - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
mdelay(1); - RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(EPHY_RXER_NUM, 0); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0);
return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0; + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0; }
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
-static void r8168dp_2_mdio_start(void __iomem *ioaddr) +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) { - RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); }
-static void r8168dp_2_mdio_stop(void __iomem *ioaddr) +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) { - RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); }
static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - r8168dp_2_mdio_start(ioaddr); + r8168dp_2_mdio_start(tp);
r8169_mdio_write(tp, reg, value);
- r8168dp_2_mdio_stop(ioaddr); + r8168dp_2_mdio_stop(tp); }
static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) { - void __iomem *ioaddr = tp->mmio_addr; int value;
- r8168dp_2_mdio_start(ioaddr); + r8168dp_2_mdio_start(tp);
value = r8169_mdio_read(tp, reg);
- r8168dp_2_mdio_stop(ioaddr); + r8168dp_2_mdio_stop(tp);
return value; } @@@ -1204,12 -1229,16 +1204,12 @@@ static int rtl_mdio_read(struct net_dev
DECLARE_RTL_COND(rtl_ephyar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(EPHYAR) & EPHYAR_FLAG; + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; }
static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); @@@ -1219,33 -1248,41 +1219,33 @@@
static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? - RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; }
DECLARE_RTL_COND(rtl_eriar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(ERIAR) & ERIAR_FLAG; + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; }
static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, u32 val, int type) { - void __iomem *ioaddr = tp->mmio_addr; - BUG_ON((addr & 3) || (mask == 0)); - RTL_W32(ERIDR, val); - RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + RTL_W32(tp, ERIDR, val); + RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); }
static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? - RTL_R32(ERIDR) : ~0; + RTL_R32(tp, ERIDR) : ~0; }
static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, @@@ -1259,9 -1296,11 +1259,9 @@@
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? - RTL_R32(OCPDR) : ~0; + RTL_R32(tp, OCPDR) : ~0; }
static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) @@@ -1289,8 -1328,10 +1289,8 @@@ static u32 ocp_read(struct rtl8169_priv static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPDR, data); - RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); }
@@@ -1352,15 -1393,19 +1352,15 @@@ DECLARE_RTL_COND(rtl_ep_ocp_read_cond
DECLARE_RTL_COND(rtl_ocp_tx_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(IBISR0) & 0x20; + return RTL_R8(tp, IBISR0) & 0x20; }
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); - RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); - RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); }
static void rtl8168dp_driver_start(struct rtl8169_private *tp) @@@ -1428,19 -1473,19 +1428,19 @@@ static void rtl8168_driver_stop(struct } }
-static int r8168dp_check_dash(struct rtl8169_private *tp) +static bool r8168dp_check_dash(struct rtl8169_private *tp) { u16 reg = rtl8168_get_ocp_reg(tp);
- return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; + return !!(ocp_read(tp, 0x0f, reg) & 0x00008000); }
-static int r8168ep_check_dash(struct rtl8169_private *tp) +static bool r8168ep_check_dash(struct rtl8169_private *tp) { - return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0; + return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001); }
-static int r8168_check_dash(struct rtl8169_private *tp) +static bool r8168_check_dash(struct rtl8169_private *tp) { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_27: @@@ -1452,7 -1497,7 +1452,7 @@@ case RTL_GIGA_MAC_VER_51: return r8168ep_check_dash(tp); default: - return 0; + return false; } }
@@@ -1473,37 -1518,49 +1473,37 @@@ static void rtl_write_exgmac_batch(stru
DECLARE_RTL_COND(rtl_efusear_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(EFUSEAR) & EFUSEAR_FLAG; + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; }
static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? - RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0; + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; }
static u16 rtl_get_events(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R16(IntrStatus); + return RTL_R16(tp, IntrStatus); }
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W16(IntrStatus, bits); + RTL_W16(tp, IntrStatus, bits); mmiowb(); }
static void rtl_irq_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W16(IntrMask, 0); + RTL_W16(tp, IntrMask, 0); mmiowb(); }
static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W16(IntrMask, bits); + RTL_W16(tp, IntrMask, bits); }
#define RTL_EVENT_NAPI_RX (RxOK | RxErr) @@@ -1517,14 -1574,18 +1517,14 @@@ static void rtl_irq_enable_all(struct r
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_irq_disable(tp); rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); - RTL_R8(ChipCmd); + RTL_R8(tp, ChipCmd); }
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(TBICSR) & TBIReset; + return RTL_R32(tp, TBICSR) & TBIReset; }
static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) @@@ -1532,19 -1593,21 +1532,19 @@@ return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; }
-static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) +static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) { - return RTL_R32(TBICSR) & TBILinkOk; + return RTL_R32(tp, TBICSR) & TBILinkOk; }
-static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) +static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) { - return RTL_R8(PHYstatus) & LinkStatus; + return RTL_R8(tp, PHYstatus) & LinkStatus; }
static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); + RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); }
static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) @@@ -1557,6 -1620,7 +1557,6 @@@
static void rtl_link_chg_patch(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; struct net_device *dev = tp->dev;
if (!netif_running(dev)) @@@ -1564,12 -1628,12 +1564,12 @@@
if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (RTL_R8(PHYstatus) & _1000bpsF) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, ERIAR_EXGMAC); - } else if (RTL_R8(PHYstatus) & _100bps) { + } else if (RTL_R8(tp, PHYstatus) & _100bps) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@@ -1587,7 -1651,7 +1587,7 @@@ ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (RTL_R8(PHYstatus) & _1000bpsF) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@@ -1599,7 -1663,7 +1599,7 @@@ ERIAR_EXGMAC); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (RTL_R8(PHYstatus) & _10bps) { + if (RTL_R8(tp, PHYstatus) & _10bps) { rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, @@@ -1612,21 -1676,20 +1612,21 @@@ }
static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr) + struct rtl8169_private *tp) { - if (tp->link_ok(ioaddr)) { + struct device *d = tp_to_dev(tp); + + if (tp->link_ok(tp)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(&tp->pci_dev->dev); + pm_request_resume(d); netif_carrier_on(dev); if (net_ratelimit()) netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); - pm_runtime_idle(&tp->pci_dev->dev); + pm_runtime_idle(d); } }
@@@ -1634,14 -1697,15 +1634,14 @@@
static u32 __rtl8169_get_wol(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; u8 options; u32 wolopts = 0;
- options = RTL_R8(Config1); + options = RTL_R8(tp, Config1); if (!(options & PMEnable)) return 0;
- options = RTL_R8(Config3); + options = RTL_R8(tp, Config3); if (options & LinkUp) wolopts |= WAKE_PHY; switch (tp->mac_version) { @@@ -1671,7 -1735,7 +1671,7 @@@ break; }
- options = RTL_R8(Config5); + options = RTL_R8(tp, Config5); if (options & UWF) wolopts |= WAKE_UCAST; if (options & BWF) @@@ -1685,7 -1749,7 +1685,7 @@@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp);
pm_runtime_get_noresume(d);
@@@ -1704,6 -1768,7 +1704,6 @@@
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { - void __iomem *ioaddr = tp->mmio_addr; unsigned int i, tmp; static const struct { u32 opt; @@@ -1719,7 -1784,7 +1719,7 @@@ }; u8 options;
- RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: @@@ -1761,39 -1826,43 +1761,39 @@@ }
for (i = 0; i < tmp; i++) { - options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; - RTL_W8(cfg[i].reg, options); + RTL_W8(tp, cfg[i].reg, options); }
switch (tp->mac_version) { case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: - options = RTL_R8(Config1) & ~PMEnable; + options = RTL_R8(tp, Config1) & ~PMEnable; if (wolopts) options |= PMEnable; - RTL_W8(Config1, options); + RTL_W8(tp, Config1, options); break; default: - options = RTL_R8(Config2) & ~PME_SIGNAL; + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; - RTL_W8(Config2, options); + RTL_W8(tp, Config2, options); break; }
- RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); }
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp);
pm_runtime_get_noresume(d);
rtl_lock_work(tp);
- if (wol->wolopts) - tp->features |= RTL_FEATURE_WOL; - else - tp->features &= ~RTL_FEATURE_WOL; if (pm_runtime_active(d)) __rtl8169_set_wol(tp, wol->wolopts); else @@@ -1801,7 -1870,7 +1801,7 @@@
rtl_unlock_work(tp);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + device_set_wakeup_enable(d, wol->wolopts);
pm_runtime_put_noidle(d);
@@@ -1837,15 -1906,16 +1837,15 @@@ static int rtl8169_set_speed_tbi(struc u8 autoneg, u16 speed, u8 duplex, u32 ignored) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; int ret = 0; u32 reg;
- reg = RTL_R32(TBICSR); + reg = RTL_R32(tp, TBICSR); if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && (duplex == DUPLEX_FULL)) { - RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); else { netif_warn(tp, link, dev, "incorrect speed setting refused in TBI mode\n"); @@@ -1970,15 -2040,16 +1970,15 @@@ static void __rtl8169_set_features(stru netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; u32 rx_config;
- rx_config = RTL_R32(RxConfig); + rx_config = RTL_R32(tp, RxConfig); if (features & NETIF_F_RXALL) rx_config |= (AcceptErr | AcceptRunt); else rx_config &= ~(AcceptErr | AcceptRunt);
- RTL_W32(RxConfig, rx_config); + RTL_W32(tp, RxConfig, rx_config);
if (features & NETIF_F_RXCSUM) tp->cp_cmd |= RxChkSum; @@@ -1990,10 -2061,10 +1990,10 @@@ else tp->cp_cmd &= ~RxVlan;
- tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); + tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum);
- RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); }
static int rtl8169_set_features(struct net_device *dev, @@@ -2030,6 -2101,7 +2030,6 @@@ static int rtl8169_get_link_ksettings_t struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; u32 status; u32 supported, advertising;
@@@ -2037,7 -2109,7 +2037,7 @@@ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; cmd->base.port = PORT_FIBRE;
- status = RTL_R32(TBICSR); + status = RTL_R32(tp, TBICSR); advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; cmd->base.autoneg = !!(status & TBINwEnable);
@@@ -2152,20 -2224,23 +2152,20 @@@ static int rtl8169_get_sset_count(struc
DECLARE_RTL_COND(rtl_counters_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump); + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); }
static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; dma_addr_t paddr = tp->counters_phys_addr; u32 cmd;
- RTL_W32(CounterAddrHigh, (u64)paddr >> 32); - RTL_R32(CounterAddrHigh); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(tp, CounterAddrHigh); cmd = (u64)paddr & DMA_BIT_MASK(32); - RTL_W32(CounterAddrLow, cmd); - RTL_W32(CounterAddrLow, cmd | counter_cmd); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } @@@ -2187,12 -2262,13 +2187,12 @@@ static bool rtl8169_reset_counters(stru static bool rtl8169_update_counters(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr;
/* * Some chips are unable to dump tally counters when the receiver * is disabled. */ - if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) return true;
return rtl8169_do_counters(dev, CounterDump); @@@ -2241,7 -2317,7 +2241,7 @@@ static void rtl8169_get_ethtool_stats(s struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL(); @@@ -2372,6 -2448,7 +2372,6 @@@ static const struct rtl_coalesce_info * static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; const struct rtl_coalesce_info *ci; const struct rtl_coalesce_scale *scale; struct { @@@ -2391,10 -2468,10 +2391,10 @@@ if (IS_ERR(ci)) return PTR_ERR(ci);
- scale = &ci->scalev[RTL_R16(CPlusCmd) & 3]; + scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3];
/* read IntrMitigate and adjust according to scale */ - for (w = RTL_R16(IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { + for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { *p->max_frames = (w & RTL_COALESCE_MASK) << 2; w >>= RTL_COALESCE_SHIFT; *p->usecs = w & RTL_COALESCE_MASK; @@@ -2441,6 -2518,7 +2441,6 @@@ static const struct rtl_coalesce_scale static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; const struct rtl_coalesce_scale *scale; struct { u32 frames; @@@ -2488,11 -2566,11 +2488,11 @@@
rtl_lock_work(tp);
- RTL_W16(IntrMitigate, swab16(w)); + RTL_W16(tp, IntrMitigate, swab16(w));
tp->cp_cmd = (tp->cp_cmd & ~3) | cp01; - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd);
rtl_unlock_work(tp);
@@@ -2522,16 -2600,17 +2522,16 @@@ static const struct ethtool_ops rtl8169 static void rtl8169_get_mac_version(struct rtl8169_private *tp, struct net_device *dev, u8 default_version) { - void __iomem *ioaddr = tp->mmio_addr; /* * The driver currently handles the 8168Bf and the 8168Be identically * but they can be identified more specifically through the test below * if needed: * - * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be * * Same thing for the 8101Eb and the 8101Ec: * - * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec */ static const struct rtl_mac_info { u32 mask; @@@ -2629,7 -2708,7 +2629,7 @@@ const struct rtl_mac_info *p = mac_info; u32 reg;
- reg = RTL_R32(TxConfig); + reg = RTL_R32(tp, TxConfig); while ((reg & p->mask) != p->val) p++; tp->mac_version = p->mac_version; @@@ -3726,6 -3805,8 +3726,6 @@@ static void rtl8168e_2_hw_phy_config(st rtl_writephy(tp, 0x1f, 0x0005); rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* soft-reset phy */ - rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ rtl_rar_exgmac_set(tp, tp->dev->dev_addr); @@@ -4510,6 -4591,7 +4510,6 @@@ static void rtl_hw_phy_config(struct ne static void rtl_phy_work(struct rtl8169_private *tp) { struct timer_list *timer = &tp->timer; - void __iomem *ioaddr = tp->mmio_addr; unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_01); @@@ -4523,7 -4605,7 +4523,7 @@@ goto out_mod_timer; }
- if (tp->link_ok(ioaddr)) + if (tp->link_ok(tp)) return;
netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); @@@ -4561,17 -4643,21 +4561,17 @@@ static void rtl8169_phy_reset(struct ne
static bool rtl_tbi_enabled(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); }
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(0x82, 0x01); + RTL_W8(tp, 0x82, 0x01); }
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); @@@ -4581,7 -4667,7 +4581,7 @@@
if (tp->mac_version == RTL_GIGA_MAC_VER_02) { dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(0x82, 0x01); + RTL_W8(tp, 0x82, 0x01); dprintk("Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } @@@ -4601,20 -4687,22 +4601,20 @@@
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_lock_work(tp);
- RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- RTL_W32(MAC4, addr[4] | addr[5] << 8); - RTL_R32(MAC4); + RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); + RTL_R32(tp, MAC4);
- RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); - RTL_R32(MAC0); + RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RTL_R32(tp, MAC0);
if (tp->mac_version == RTL_GIGA_MAC_VER_34) rtl_rar_exgmac_set(tp, addr);
- RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock);
rtl_unlock_work(tp); } @@@ -4622,12 -4710,13 +4622,12 @@@ static int rtl_set_mac_address(struct net_device *dev, void *p) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; + struct device *d = tp_to_dev(tp); + int ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + ret = eth_mac_addr(dev, p); + if (ret) + return ret;
pm_runtime_get_noresume(d);
@@@ -4733,6 -4822,8 +4733,6 @@@ static void rtl_speed_down(struct rtl81
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: @@@ -4756,7 -4847,7 +4756,7 @@@ case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RTL_R32(RxConfig) | + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; default: @@@ -4789,6 -4880,8 +4789,6 @@@ static void r810x_phy_power_up(struct r
static void r810x_pll_power_down(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_wol_pll_power_down(tp)) return;
@@@ -4803,13 -4896,15 +4803,13 @@@ case RTL_GIGA_MAC_VER_16: break; default: - RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; } }
static void r810x_pll_power_up(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - r810x_phy_power_up(tp);
switch (tp->mac_version) { @@@ -4822,10 -4917,10 +4822,10 @@@ break; case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; default: - RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); break; } } @@@ -4892,12 -4987,21 +4892,12 @@@ static void r8168_phy_power_down(struc
static void r8168_pll_power_down(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) && - r8168_check_dash(tp)) { + if (r8168_check_dash(tp)) return; - }
if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || tp->mac_version == RTL_GIGA_MAC_VER_24) && - (RTL_R16(CPlusCmd) & ASF)) { + (RTL_R16(tp, CPlusCmd) & ASF)) { return; }
@@@ -4923,20 -5027,22 +4923,20 @@@ case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, 0xfc000000, ERIAR_EXGMAC); - RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; } }
static void r8168_pll_power_up(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: @@@ -4945,19 -5051,19 +4945,19 @@@ case RTL_GIGA_MAC_VER_31: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: - RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); break; case RTL_GIGA_MAC_VER_44: case RTL_GIGA_MAC_VER_45: case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: - RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, 0x00000000, ERIAR_EXGMAC); break; @@@ -5047,6 -5153,8 +5047,6 @@@ static void rtl_init_pll_power_ops(stru
static void rtl_init_rxcfg(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_01: case RTL_GIGA_MAC_VER_02: @@@ -5062,7 -5170,7 +5062,7 @@@ case RTL_GIGA_MAC_VER_15: case RTL_GIGA_MAC_VER_16: case RTL_GIGA_MAC_VER_17: - RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_18: case RTL_GIGA_MAC_VER_19: @@@ -5073,7 -5181,7 +5073,7 @@@ case RTL_GIGA_MAC_VER_24: case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_35: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: @@@ -5087,10 -5195,10 +5087,10 @@@ case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); break; } } @@@ -5102,82 -5210,102 +5102,82 @@@ static void rtl8169_init_ring_indexes(s
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.enable); - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); }
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.disable); - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); }
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); - rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); }
static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); - rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); }
static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); }
static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); }
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(MaxTxPacketSize, 0x3f); - RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) | 0x01); - rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); }
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(MaxTxPacketSize, 0x0c); - RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) & ~0x01); - rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + RTL_W8(tp, MaxTxPacketSize, 0x0c); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); }
static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) { - rtl_tx_performance_tweak(tp->pci_dev, + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); }
static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) { - rtl_tx_performance_tweak(tp->pci_dev, + rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); }
static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - r8168b_0_hw_jumbo_enable(tp);
- RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); }
static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - r8168b_0_hw_jumbo_disable(tp);
- RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); }
static void rtl_init_jumbo_ops(struct rtl8169_private *tp) @@@ -5244,12 -5372,16 +5244,12 @@@
DECLARE_RTL_COND(rtl_chipcmd_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(ChipCmd) & CmdReset; + return RTL_R8(tp, ChipCmd) & CmdReset; }
static void rtl_hw_reset(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(ChipCmd, CmdReset); + RTL_W8(tp, ChipCmd, CmdReset);
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); } @@@ -5268,7 -5400,7 +5268,7 @@@ static void rtl_request_uncached_firmwa if (!rtl_fw) goto err_warn;
- rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp)); if (rc < 0) goto err_free;
@@@ -5300,21 -5432,29 +5300,21 @@@ static void rtl_request_firmware(struc
static void rtl_rx_close(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); }
DECLARE_RTL_COND(rtl_npq_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(TxPoll) & NPQ; + return RTL_R8(tp, TxPoll) & NPQ; }
DECLARE_RTL_COND(rtl_txcfg_empty_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(TxConfig) & TXCFG_EMPTY; + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; }
static void rtl8169_hw_reset(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* Disable interrupts */ rtl8169_irq_mask_and_ack(tp);
@@@ -5341,10 -5481,10 +5341,10 @@@ tp->mac_version == RTL_GIGA_MAC_VER_49 || tp->mac_version == RTL_GIGA_MAC_VER_50 || tp->mac_version == RTL_GIGA_MAC_VER_51) { - RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); } else { - RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); udelay(100); }
@@@ -5353,8 -5493,10 +5353,8 @@@
static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* Set DMA burst size and Interframe Gap Time */ - RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift)); }
@@@ -5367,35 -5509,36 +5367,35 @@@ static void rtl_hw_start(struct net_dev rtl_irq_enable_all(tp); }
-static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, - void __iomem *ioaddr) +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) { /* * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh * register to be written before TxDescAddrLow to work. * Switching from MMIO to I/O access fixes the issue as well. */ - RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); - RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); - RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); - RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); }
-static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) +static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp) { u16 cmd;
- cmd = RTL_R16(CPlusCmd); - RTL_W16(CPlusCmd, cmd); + cmd = RTL_R16(tp, CPlusCmd); + RTL_W16(tp, CPlusCmd, cmd); return cmd; }
-static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +static void rtl_set_rx_max_size(struct rtl8169_private *tp, unsigned int rx_buf_sz) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(RxMaxSize, rx_buf_sz + 1); + RTL_W16(tp, RxMaxSize, rx_buf_sz + 1); }
-static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) +static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) { static const struct rtl_cfg2_info { u32 mac_version; @@@ -5411,10 -5554,10 +5411,10 @@@ unsigned int i; u32 clk;
- clk = RTL_R8(Config2) & PCI_Clock_66MHz; + clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz; for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { if ((p->mac_version == mac_version) && (p->clk == clk)) { - RTL_W32(0x7c, p->val); + RTL_W32(tp, 0x7c, p->val); break; } } @@@ -5423,6 -5566,7 +5423,6 @@@ static void rtl_set_rx_mode(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; u32 mc_filter[2]; /* Multicast hash filter */ int rx_mode; u32 tmp = 0; @@@ -5454,7 -5598,7 +5454,7 @@@ if (dev->features & NETIF_F_RXALL) rx_mode |= (AcceptErr | AcceptRunt);
- tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
if (tp->mac_version > RTL_GIGA_MAC_VER_06) { u32 data = mc_filter[0]; @@@ -5466,34 -5610,35 +5466,34 @@@ if (tp->mac_version == RTL_GIGA_MAC_VER_35) mc_filter[1] = mc_filter[0] = 0xffffffff;
- RTL_W32(MAR0 + 4, mc_filter[1]); - RTL_W32(MAR0 + 0, mc_filter[0]); + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]);
- RTL_W32(RxConfig, tmp); + RTL_W32(tp, RxConfig, tmp); }
static void rtl_hw_start_8169(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev;
if (tp->mac_version == RTL_GIGA_MAC_VER_05) { - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW); pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); }
- RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); if (tp->mac_version == RTL_GIGA_MAC_VER_01 || tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03 || tp->mac_version == RTL_GIGA_MAC_VER_04) - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
- RTL_W8(EarlyTxThres, NoEarlyTx); + RTL_W8(tp, EarlyTxThres, NoEarlyTx);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz); + rtl_set_rx_max_size(tp, rx_buf_sz);
if (tp->mac_version == RTL_GIGA_MAC_VER_01 || tp->mac_version == RTL_GIGA_MAC_VER_02 || @@@ -5501,7 -5646,7 +5501,7 @@@ tp->mac_version == RTL_GIGA_MAC_VER_04) rtl_set_rx_tx_config_registers(tp);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { @@@ -5510,37 -5655,37 +5510,37 @@@ tp->cp_cmd |= (1 << 14); }
- RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl8169_set_magic_reg(ioaddr, tp->mac_version); + rtl8169_set_magic_reg(tp, tp->mac_version);
/* * Undocumented corner. Supposedly: * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets */ - RTL_W16(IntrMitigate, 0x0000); + RTL_W16(tp, IntrMitigate, 0x0000);
- rtl_set_rx_tx_desc_registers(tp, ioaddr); + rtl_set_rx_tx_desc_registers(tp);
if (tp->mac_version != RTL_GIGA_MAC_VER_01 && tp->mac_version != RTL_GIGA_MAC_VER_02 && tp->mac_version != RTL_GIGA_MAC_VER_03 && tp->mac_version != RTL_GIGA_MAC_VER_04) { - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); }
- RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(IntrMask); + RTL_R8(tp, IntrMask);
- RTL_W32(RxMissed, 0); + RTL_W32(tp, RxMissed, 0);
rtl_set_rx_mode(dev);
/* no early-rx interrupts */ - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); }
static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) @@@ -5574,13 -5719,17 +5574,13 @@@ static void rtl_csi_access_enable_2(str
DECLARE_RTL_COND(rtl_csiar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(CSIAR) & CSIAR_FLAG; + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; }
static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); @@@ -5588,17 -5737,21 +5588,17 @@@
static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(CSIDR) : ~0; + RTL_R32(tp, CSIDR) : ~0; }
static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | CSIAR_FUNC_NIC);
@@@ -5607,17 -5760,21 +5607,17 @@@
static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(CSIDR) : ~0; + RTL_R32(tp, CSIDR) : ~0; }
static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | CSIAR_FUNC_NIC2);
@@@ -5626,11 -5783,13 +5626,11 @@@
static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(CSIDR) : ~0; + RTL_R32(tp, CSIDR) : ~0; }
static void rtl_init_csi_ops(struct rtl8169_private *tp) @@@ -5692,30 -5851,31 +5692,30 @@@ static void rtl_ephy_init(struct rtl816 } }
-static void rtl_disable_clock_request(struct pci_dev *pdev) +static void rtl_disable_clock_request(struct rtl8169_private *tp) { - pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_CLKREQ_EN); }
-static void rtl_enable_clock_request(struct pci_dev *pdev) +static void rtl_enable_clock_request(struct rtl8169_private *tp) { - pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_CLKREQ_EN); }
static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) { - void __iomem *ioaddr = tp->mmio_addr; u8 data;
- data = RTL_R8(Config3); + data = RTL_R8(tp, Config3);
if (enable) data |= Rdy_to_L23; else data &= ~Rdy_to_L23;
- RTL_W8(Config3, data); + RTL_W8(tp, Config3, data); }
#define R8168_CPCMD_QUIRK_MASK (\ @@@ -5731,37 -5891,45 +5731,37 @@@
static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); } }
static void rtl_hw_start_8168bef(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_hw_start_8168bb(tp);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); }
static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
- RTL_W8(Config1, RTL_R8(Config1) | Speed_down); - - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); }
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) @@@ -5783,35 -5951,42 +5783,35 @@@
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); }
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
/* Magic. */ - RTL_W8(DBG_REG, 0x20); + RTL_W8(tp, DBG_REG, 0x20);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); }
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168c_1[] = { { 0x02, 0x0800, 0x1000 }, { 0x03, 0, 0x0002 }, @@@ -5820,7 -5995,7 +5820,7 @@@
rtl_csi_access_enable_2(tp);
- RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
@@@ -5855,32 -6030,40 +5855,32 @@@ static void rtl_hw_start_8168c_4(struc
static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp);
- rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); }
static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_1(tp);
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); }
static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168d_4[] = { { 0x0b, 0x0000, 0x0048 }, { 0x19, 0x0020, 0x0050 }, @@@ -5889,17 -6072,19 +5889,17 @@@
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4));
- rtl_enable_clock_request(pdev); + rtl_enable_clock_request(tp); }
static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_1[] = { { 0x00, 0x0200, 0x0100 }, { 0x00, 0x0000, 0x0004 }, @@@ -5921,21 -6106,23 +5921,21 @@@ rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp);
/* Reset tx FIFO pointer */ - RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); - RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); }
static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_2[] = { { 0x09, 0x0000, 0x0080 }, { 0x19, 0x0000, 0x0224 } @@@ -5946,7 -6133,7 +5946,7 @@@ rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@@ -5957,26 -6144,29 +5957,26 @@@ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
- RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
/* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); }
static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@@ -5989,19 -6179,20 +5989,19 @@@ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
- RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); }
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168f_1[] = { { 0x06, 0x00c0, 0x0020 }, { 0x08, 0x0001, 0x0002 }, @@@ -6016,7 -6207,7 +6016,7 @@@ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); }
static void rtl_hw_start_8411(struct rtl8169_private *tp) @@@ -6038,7 -6229,10 +6038,7 @@@
static void rtl_hw_start_8168g(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); @@@ -6047,20 -6241,20 +6047,20 @@@
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); @@@ -6070,6 -6264,7 +6070,6 @@@
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168g_1[] = { { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x37d0, 0x0820 }, @@@ -6080,13 -6275,14 +6080,13 @@@ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); }
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168g_2[] = { { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x3df0, 0x0200 }, @@@ -6097,13 -6293,14 +6097,13 @@@ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); }
static void rtl_hw_start_8411_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8411_2[] = { { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x3df0, 0x0200 }, @@@ -6115,13 -6312,15 +6115,13 @@@ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); }
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { @@@ -6134,11 -6333,11 +6134,11 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); @@@ -6147,7 -6346,7 +6147,7 @@@
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@@ -6158,19 -6357,19 +6158,19 @@@
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
@@@ -6218,9 -6417,12 +6218,9 @@@
static void rtl_hw_start_8168ep(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl8168ep_stop_cmac(tp);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); @@@ -6229,7 -6431,7 +6229,7 @@@
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@@ -6238,24 -6440,25 +6238,24 @@@
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
rtl_pcie_state_l2l3_enable(tp, false); }
static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168ep_1[] = { { 0x00, 0xffff, 0x10ab }, { 0x06, 0xffff, 0xf030 }, @@@ -6265,8 -6468,8 +6265,8 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp); @@@ -6274,6 -6477,7 +6274,6 @@@
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168ep_2[] = { { 0x00, 0xffff, 0x10a3 }, { 0x19, 0xffff, 0xfc00 }, @@@ -6281,18 -6485,19 +6281,18 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); }
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; u32 data; static const struct ephy_info e_info_8168ep_3[] = { { 0x00, 0xffff, 0x10a3 }, @@@ -6302,14 -6507,14 +6302,14 @@@ };
/* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
data = r8168_mac_ocp_read(tp, 0xd3e2); data &= 0xf000; @@@ -6328,18 -6533,19 +6328,18 @@@ static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr;
- RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz); + rtl_set_rx_max_size(tp, rx_buf_sz);
- tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1;
- RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_W16(IntrMitigate, 0x5151); + RTL_W16(tp, IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { @@@ -6347,11 -6553,11 +6347,11 @@@ tp->event_slow &= ~RxOverflow; }
- rtl_set_rx_tx_desc_registers(tp, ioaddr); + rtl_set_rx_tx_desc_registers(tp);
rtl_set_rx_tx_config_registers(tp);
- RTL_R8(IntrMask); + RTL_R8(tp, IntrMask);
switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: @@@ -6457,13 -6663,13 +6457,13 @@@ break; }
- RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_mode(dev);
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); }
#define R810X_CPCMD_QUIRK_MASK (\ @@@ -6479,6 -6685,8 +6479,6 @@@
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8102e_1[] = { { 0x01, 0, 0x6e65 }, { 0x02, 0, 0x091f }, @@@ -6493,29 -6701,32 +6493,29 @@@
rtl_csi_access_enable_2(tp);
- RTL_W8(DBG_REG, FIX_NAK_1); + RTL_W8(tp, DBG_REG, FIX_NAK_1);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(Config1, + RTL_W8(tp, Config1, LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- cfg1 = RTL_R8(Config1); + cfg1 = RTL_R8(tp, Config1); if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) - RTL_W8(Config1, cfg1 & ~LEDS0); + RTL_W8(tp, Config1, cfg1 & ~LEDS0);
rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); }
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); }
static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) @@@ -6527,6 -6738,7 +6527,6 @@@
static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8105e_1[] = { { 0x07, 0, 0x4000 }, { 0x19, 0, 0x0200 }, @@@ -6539,13 -6751,13 +6539,13 @@@ };
/* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
/* Disable Early Tally Counter */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
- RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
@@@ -6560,6 -6772,7 +6560,6 @@@ static void rtl_hw_start_8105e_2(struc
static void rtl_hw_start_8402(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8402[] = { { 0x19, 0xffff, 0xff64 }, { 0x1e, 0, 0x4000 } @@@ -6568,14 -6781,14 +6568,14 @@@ rtl_csi_access_enable_2(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
- rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); @@@ -6590,12 -6803,14 +6590,12 @@@
static void rtl_hw_start_8106(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); - RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
rtl_pcie_state_l2l3_enable(tp, false); } @@@ -6603,6 -6818,7 +6603,6 @@@ static void rtl_hw_start_8101(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev;
if (tp->mac_version >= RTL_GIGA_MAC_VER_30) @@@ -6613,16 -6829,16 +6613,16 @@@ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN);
- RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz); + rtl_set_rx_max_size(tp, rx_buf_sz);
tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl_set_rx_tx_desc_registers(tp, ioaddr); + rtl_set_rx_tx_desc_registers(tp);
rtl_set_rx_tx_config_registers(tp);
@@@ -6662,17 -6878,17 +6662,17 @@@ break; }
- RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- RTL_W16(IntrMitigate, 0x0000); + RTL_W16(tp, IntrMitigate, 0x0000);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_mode(dev);
- RTL_R8(IntrMask); + RTL_R8(tp, IntrMask);
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); }
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@@ -6699,7 -6915,7 +6699,7 @@@ static inline void rtl8169_make_unusabl static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, void **data_buff, struct RxDesc *desc) { - dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), rx_buf_sz, DMA_FROM_DEVICE);
kfree(*data_buff); @@@ -6734,7 -6950,7 +6734,7 @@@ static struct sk_buff *rtl8169_alloc_rx { void *data; dma_addr_t mapping; - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); struct net_device *dev = tp->dev; int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
@@@ -6846,7 -7062,7 +6846,7 @@@ static void rtl8169_tx_clear_range(stru if (len) { struct sk_buff *skb = tx_skb->skb;
- rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, tp->TxDescArray + entry); if (skb) { dev_consume_skb_any(skb); @@@ -6882,7 -7098,7 +6882,7 @@@ static void rtl_reset_work(struct rtl81 napi_enable(&tp->napi); rtl_hw_start(dev); netif_wake_queue(dev); - rtl8169_check_link_status(dev, tp, tp->mmio_addr); + rtl8169_check_link_status(dev, tp); }
static void rtl8169_tx_timeout(struct net_device *dev) @@@ -6898,7 -7114,7 +6898,7 @@@ static int rtl8169_xmit_frags(struct rt struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; struct TxDesc *uninitialized_var(txd); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp);
entry = tp->cur_tx; for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { @@@ -7130,7 -7346,8 +7130,7 @@@ static netdev_tx_t rtl8169_start_xmit(s struct rtl8169_private *tp = netdev_priv(dev); unsigned int entry = tp->cur_tx % NUM_TX_DESC; struct TxDesc *txd = tp->TxDescArray + entry; - void __iomem *ioaddr = tp->mmio_addr; - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 status, len; u32 opts[2]; @@@ -7189,7 -7406,7 +7189,7 @@@
tp->cur_tx += frags + 1;
- RTL_W8(TxPoll, NPQ); + RTL_W8(tp, TxPoll, NPQ);
mmiowb();
@@@ -7260,9 -7477,11 +7260,9 @@@ static void rtl8169_pcierr_interrupt(st
/* The infamous DAC f*ckup only happens at boot time */ if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { - void __iomem *ioaddr = tp->mmio_addr; - netif_info(tp, intr, dev, "disabling PCI DAC\n"); tp->cp_cmd &= ~PCIDAC; - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); dev->features &= ~NETIF_F_HIGHDMA; }
@@@ -7294,7 -7513,7 +7294,7 @@@ static void rtl_tx(struct net_device *d */ dma_rmb();
- rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { u64_stats_update_begin(&tp->tx_stats.syncp); @@@ -7328,8 -7547,11 +7328,8 @@@ * of start_xmit activity is detected (if it is not detected, * it is slow enough). -- FR */ - if (tp->cur_tx != dirty_tx) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(TxPoll, NPQ); - } + if (tp->cur_tx != dirty_tx) + RTL_W8(tp, TxPoll, NPQ); } }
@@@ -7355,7 -7577,7 +7355,7 @@@ static struct sk_buff *rtl8169_try_rx_c dma_addr_t addr) { struct sk_buff *skb; - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp);
data = rtl8169_align(data); dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); @@@ -7510,7 -7732,7 +7510,7 @@@ static void rtl_slow_event_work(struct rtl8169_pcierr_interrupt(dev);
if (status & LinkChg) - rtl8169_check_link_status(dev, tp, tp->mmio_addr); + rtl8169_check_link_status(dev, tp);
rtl_irq_enable_all(tp); } @@@ -7582,20 -7804,21 +7582,20 @@@ static int rtl8169_poll(struct napi_str return work_done; }
-static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) +static void rtl8169_rx_missed(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev);
if (tp->mac_version > RTL_GIGA_MAC_VER_06) return;
- dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); - RTL_W32(RxMissed, 0); + dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff; + RTL_W32(tp, RxMissed, 0); }
static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr;
del_timer_sync(&tp->timer);
@@@ -7608,7 -7831,7 +7608,7 @@@ * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) * and napi is disabled (rtl8169_poll). */ - rtl8169_rx_missed(dev, ioaddr); + rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */ synchronize_sched(); @@@ -7638,7 -7861,7 +7638,7 @@@ static int rtl8169_close(struct net_dev
cancel_work_sync(&tp->wk.work);
- free_irq(pdev->irq, dev); + pci_free_irq(pdev, 0, dev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@@ -7657,13 -7880,14 +7657,13 @@@ static void rtl8169_netpoll(struct net_ { struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_interrupt(tp->pci_dev->irq, dev); + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev); } #endif
static int rtl_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; int retval = -ENOMEM;
@@@ -7693,8 -7917,9 +7693,8 @@@
rtl_request_firmware(tp);
- retval = request_irq(pdev->irq, rtl8169_interrupt, - (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, - dev->name, dev); + retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, dev, + dev->name); if (retval < 0) goto err_release_fw_2;
@@@ -7722,7 -7947,7 +7722,7 @@@ tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev);
- rtl8169_check_link_status(dev, tp, ioaddr); + rtl8169_check_link_status(dev, tp); out: return retval;
@@@ -7746,6 -7971,7 +7746,6 @@@ static voi rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; struct rtl8169_counters *counters = tp->counters; unsigned int start; @@@ -7753,7 -7979,7 +7753,7 @@@ pm_runtime_get_noresume(&pdev->dev);
if (netif_running(dev) && pm_runtime_active(&pdev->dev)) - rtl8169_rx_missed(dev, ioaddr); + rtl8169_rx_missed(dev);
do { start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); @@@ -7876,7 -8102,7 +7876,7 @@@ static int rtl8169_runtime_suspend(stru rtl8169_net_suspend(dev);
/* Update counters before going runtime suspend */ - rtl8169_rx_missed(dev, tp->mmio_addr); + rtl8169_rx_missed(dev); rtl8169_update_counters(dev);
return 0; @@@ -7937,6 -8163,8 +7937,6 @@@ static const struct dev_pm_ops rtl8169_
static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* WoL fails with 8168b when the receiver is disabled. */ switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: @@@ -7944,9 -8172,9 +7944,9 @@@ case RTL_GIGA_MAC_VER_17: pci_clear_master(tp->pci_dev);
- RTL_W8(ChipCmd, CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdRxEnb); /* PCI commit */ - RTL_R8(ChipCmd); + RTL_R8(tp, ChipCmd); break; default: break; @@@ -7981,8 -8209,15 +7981,8 @@@ static void rtl_remove_one(struct pci_d struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) && - r8168_check_dash(tp)) { + if (r8168_check_dash(tp)) rtl8168_driver_stop(tp); - }
netif_napi_del(&tp->napi);
@@@ -8021,7 -8256,7 +8021,7 @@@ static const struct rtl_cfg_info unsigned int region; unsigned int align; u16 event_slow; - unsigned features; + unsigned int has_gmii:1; const struct rtl_coalesce_info *coalesce_info; u8 default_ver; } rtl_cfg_infos [] = { @@@ -8030,7 -8265,7 +8030,7 @@@ .region = 1, .align = 0, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, - .features = RTL_FEATURE_GMII, + .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8169, .default_ver = RTL_GIGA_MAC_VER_01, }, @@@ -8039,7 -8274,7 +8039,7 @@@ .region = 2, .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow, - .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8168_8136, .default_ver = RTL_GIGA_MAC_VER_11, }, @@@ -8049,44 -8284,56 +8049,44 @@@ .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | PCSTimeout, - .features = RTL_FEATURE_MSI, .coalesce_info = rtl_coalesce_info_8168_8136, .default_ver = RTL_GIGA_MAC_VER_13, } };
-/* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct rtl8169_private *tp, - const struct rtl_cfg_info *cfg) +static int rtl_alloc_irq(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - unsigned msi = 0; - u8 cfg2; + unsigned int flags;
- cfg2 = RTL_R8(Config2) & ~MSIEnable; - if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(tp->pci_dev)) { - netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); - } else { - cfg2 |= MSIEnable; - msi = RTL_FEATURE_MSI; - } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + flags = PCI_IRQ_LEGACY; + } else { + flags = PCI_IRQ_ALL_TYPES; } - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - RTL_W8(Config2, cfg2); - return msi; + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); }
DECLARE_RTL_COND(rtl_link_list_ready_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(MCU) & LINK_LIST_RDY; + return RTL_R8(tp, MCU) & LINK_LIST_RDY; }
DECLARE_RTL_COND(rtl_rxtx_empty_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; }
static void rtl_hw_init_8168g(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; u32 data;
tp->ocp_base = OCP_STD_PHY_BASE;
- RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) return; @@@ -8094,9 -8341,9 +8094,9 @@@ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) return;
- RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); msleep(1); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
data = r8168_mac_ocp_read(tp, 0xe8de); data &= ~(1 << 14); @@@ -8150,6 -8397,7 +8150,6 @@@ static int rtl_init_one(struct pci_dev struct rtl8169_private *tp; struct mii_if_info *mii; struct net_device *dev; - void __iomem *ioaddr; int chipset, i; int rc;
@@@ -8175,7 -8423,7 +8175,7 @@@ mii->mdio_write = rtl_mdio_write; mii->phy_id_mask = 0x1f; mii->reg_num_mask = 0x1f; - mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + mii->supports_gmii = cfg->has_gmii;
/* disable ASPM completely as that cause random device stop working * problems as well as full system hangs for some PCIe devices users */ @@@ -8207,13 -8455,20 +8207,13 @@@ return -ENODEV; }
- rc = pci_request_regions(pdev, MODULENAME); + rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); if (rc < 0) { - netif_err(tp, probe, dev, "could not request regions\n"); + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); return rc; }
- /* ioremap MMIO region */ - ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region), - R8169_REGS_SIZE); - if (!ioaddr) { - netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); - return -EIO; - } - tp->mmio_addr = ioaddr; + tp->mmio_addr = pcim_iomap_table(pdev)[region];
if (!pci_is_pcie(pdev)) netif_info(tp, probe, dev, "not PCI Express\n"); @@@ -8263,14 -8518,41 +8263,14 @@@ chipset = tp->mac_version; tp->txd_version = rtl_chip_infos[chipset].txd_version;
- RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W8(Config1, RTL_R8(Config1) | PMEnable); - RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) - tp->features |= RTL_FEATURE_WOL; - if ((RTL_R8(Config3) & LinkUp) != 0) - tp->features |= RTL_FEATURE_WOL; - break; - default: - if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) - tp->features |= RTL_FEATURE_WOL; - break; + rc = rtl_alloc_irq(tp); + if (rc < 0) { + netif_err(tp, probe, dev, "Can't allocate interrupt\n"); + return rc; } - if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) - tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(tp, cfg); - RTL_W8(Cfg9346, Cfg9346_Lock); + + /* override BIOS settings, use userspace tools to enable WOL */ + __rtl8169_set_wol(tp, 0);
if (rtl_tbi_enabled(tp)) { tp->set_speed = rtl8169_set_speed_tbi; @@@ -8318,7 -8600,7 +8318,7 @@@ rtl_rar_set(tp, (u8 *)mac_addr); } for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = RTL_R8(MAC0 + i); + dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
dev->ethtool_ops = &rtl8169_ethtool_ops; dev->watchdog_timeo = RTL8169_TX_TIMEOUT; @@@ -8378,16 -8660,15 +8378,16 @@@ if (!tp->counters) return -ENOMEM;
+ pci_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) return rc;
- pci_set_drvdata(pdev, dev); - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", - rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, - (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr, + (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff), + pci_irq_vector(pdev, 0)); if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " "tx checksumming: %s]\n", @@@ -8395,8 -8676,15 +8395,8 @@@ rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); }
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) && - r8168_check_dash(tp)) { + if (r8168_check_dash(tp)) rtl8168_driver_start(tp); - }
netif_carrier_off(dev);
diff --combined drivers/net/hyperv/rndis_filter.c index e2b68d9328a7,465c42e30508..6b127be781d9 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@@ -31,7 -31,6 +31,7 @@@ #include <linux/rtnetlink.h>
#include "hyperv_net.h" +#include "netvsc_trace.h"
static void rndis_set_multicast(struct work_struct *w);
@@@ -242,8 -241,6 +242,8 @@@ static int rndis_filter_send_request(st pb[0].len; }
+ trace_rndis_send(dev->ndev, 0, &req->request_msg); + rcu_read_lock_bh(); ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); @@@ -365,15 -362,14 +365,15 @@@ static inline void *rndis_get_ppi(struc
static int rndis_filter_receive_data(struct net_device *ndev, struct netvsc_device *nvdev, - struct rndis_message *msg, struct vmbus_channel *channel, - void *data, u32 data_buflen) + struct rndis_message *msg, + u32 data_buflen) { struct rndis_packet *rndis_pkt = &msg->msg.pkt; const struct ndis_tcp_ip_checksum_info *csum_info; const struct ndis_pkt_8021q_info *vlan; u32 data_offset; + void *data;
/* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; @@@ -394,15 -390,14 +394,15 @@@
vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+ csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); + + data = (void *)msg + data_offset; + /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ - data = (void *)((unsigned long)data + data_offset); - csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(ndev, nvdev, channel, data, rndis_pkt->data_len, csum_info, vlan); @@@ -421,8 -416,8 +421,8 @@@ int rndis_filter_receive(struct net_dev
switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, net_dev, rndis_msg, - channel, data, buflen); + return rndis_filter_receive_data(ndev, net_dev, channel, + rndis_msg, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: @@@ -439,10 -434,10 +439,10 @@@ "unhandled rndis message (type %u len %u)\n", rndis_msg->ndis_msg_type, rndis_msg->msg_len); - break; + return NVSP_STAT_FAIL; }
- return 0; + return NVSP_STAT_SUCCESS; }
static int rndis_filter_query_device(struct rndis_device *dev, @@@ -863,7 -858,7 +863,7 @@@ static void rndis_set_multicast(struct if (flags & IFF_PROMISC) { filter = NDIS_PACKET_TYPE_PROMISCUOUS; } else { - if (flags & IFF_ALLMULTI) + if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI)) filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; if (flags & IFF_BROADCAST) filter |= NDIS_PACKET_TYPE_BROADCAST; @@@ -947,11 -942,12 +947,11 @@@ static bool netvsc_device_idle(const st return true; }
-static void rndis_filter_halt_device(struct rndis_device *dev) +static void rndis_filter_halt_device(struct netvsc_device *nvdev, + struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
/* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@@ -1090,8 -1086,6 +1090,8 @@@ void rndis_set_subchannel(struct work_s init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; init_packet->msg.v5_msg.subchn_req.num_subchannels = nvdev->num_chn - 1; + trace_nvsp_send(ndev, init_packet); + ret = vmbus_sendpacket(hv_dev->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@@ -1356,7 -1350,7 +1356,7 @@@ void rndis_filter_device_remove(struct struct rndis_device *rndis_dev = net_dev->extension;
/* Halt and release the rndis device */ - rndis_filter_halt_device(rndis_dev); + rndis_filter_halt_device(net_dev, rndis_dev);
net_dev->extension = NULL;
diff --combined drivers/net/team/team.c index 222093e878a8,befed2d22bf4..a6c6ce19eeee --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -1105,15 -1105,14 +1105,15 @@@ static void team_port_disable_netpoll(s } #endif
-static int team_upper_dev_link(struct team *team, struct team_port *port) +static int team_upper_dev_link(struct team *team, struct team_port *port, + struct netlink_ext_ack *extack) { struct netdev_lag_upper_info lag_upper_info; int err;
lag_upper_info.tx_type = team->mode->lag_tx_type; err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, - &lag_upper_info, NULL); + &lag_upper_info, extack); if (err) return err; port->dev->priv_flags |= IFF_TEAM_PORT; @@@ -1130,8 -1129,7 +1130,8 @@@ static void __team_port_change_port_add static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev);
-static int team_port_add(struct team *team, struct net_device *port_dev) +static int team_port_add(struct team *team, struct net_device *port_dev, + struct netlink_ext_ack *extack) { struct net_device *dev = team->dev; struct team_port *port; @@@ -1139,14 -1137,12 +1139,14 @@@ int err;
if (port_dev->flags & IFF_LOOPBACK) { + NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port"); netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; }
if (team_port_exists(port_dev)) { + NL_SET_ERR_MSG(extack, "Device is already a port of a team device"); netdev_err(dev, "Device %s is already a port " "of a team device\n", portname); return -EBUSY; @@@ -1154,7 -1150,6 +1154,7 @@@
if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { + NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", portname); return -EPERM; @@@ -1165,7 -1160,6 +1165,7 @@@ return err;
if (port_dev->flags & IFF_UP) { + NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port"); netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); return -EBUSY; @@@ -1203,11 -1197,6 +1203,6 @@@ goto err_dev_open; }
- netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); - err = vlan_vids_add_by_dev(port_dev, dev); if (err) { netdev_err(dev, "Failed to add vlan ids to device %s\n", @@@ -1233,7 -1222,7 +1228,7 @@@ goto err_handler_register; }
- err = team_upper_dev_link(team, port); + err = team_upper_dev_link(team, port, extack); if (err) { netdev_err(dev, "Device %s failed to set upper link\n", portname); @@@ -1247,6 -1236,11 +1242,11 @@@ goto err_option_port_add; }
+ netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); @@@ -1271,8 -1265,6 +1271,6 @@@ err_enable_netpoll vlan_vids_del_by_dev(port_dev, dev);
err_vids_add: - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); dev_close(port_dev);
err_dev_open: @@@ -1927,7 -1919,7 +1925,7 @@@ static int team_add_slave(struct net_de int err;
mutex_lock(&team->lock); - err = team_port_add(team, port_dev); + err = team_port_add(team, port_dev, extack); mutex_unlock(&team->lock);
if (!err) diff --combined drivers/net/vrf.c index 102582459bef,ac40924fe437..0a2b180d138a --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@@ -578,12 -578,13 +578,13 @@@ static int vrf_finish_output(struct ne if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb); + rcu_read_unlock_bh(); + return ret; }
rcu_read_unlock_bh(); err: - if (unlikely(ret < 0)) - vrf_tx_error(skb->dev, skb); + vrf_tx_error(skb->dev, skb); return ret; }
@@@ -736,6 -737,7 +737,6 @@@ static int vrf_rtable_create(struct net return -ENOMEM;
rth->dst.output = vrf_output; - rth->rt_table_id = vrf->tb_id;
rcu_assign_pointer(vrf->rth, rth);
@@@ -941,7 -943,6 +942,7 @@@ static struct rt6_info *vrf_ip6_route_l const struct net_device *dev, struct flowi6 *fl6, int ifindex, + const struct sk_buff *skb, int flags) { struct net_vrf *vrf = netdev_priv(dev); @@@ -960,7 -961,7 +961,7 @@@ if (!table) return NULL;
- return ip6_pol_route(net, table, ifindex, fl6, flags); + return ip6_pol_route(net, table, ifindex, fl6, skb, flags); }
static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, @@@ -978,7 -979,7 +979,7 @@@ struct net *net = dev_net(vrf_dev); struct rt6_info *rt6;
- rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, + rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb, RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); if (unlikely(!rt6)) return; @@@ -1111,7 -1112,7 +1112,7 @@@ static struct dst_entry *vrf_link_scope if (!ipv6_addr_any(&fl6->saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR;
- rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags); + rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags); if (rt) dst = &rt->dst;
@@@ -1146,7 -1147,6 +1147,7 @@@ static inline size_t vrf_fib_rule_nl_si sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ + sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */
return sz; } @@@ -1177,9 -1177,6 +1178,9 @@@ static int vrf_fib_rule(const struct ne frh->family = family; frh->action = FR_ACT_TO_TBL;
+ if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL)) + goto nla_put_failure; + if (nla_put_u8(skb, FRA_L3MDEV, 1)) goto nla_put_failure;
diff --combined drivers/scsi/iscsi_tcp.c index 0ad00dbf912d,dd66c11399a2..6d886b13dbe9 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@@ -37,6 -37,7 +37,7 @@@ #include <linux/kfifo.h> #include <linux/scatterlist.h> #include <linux/module.h> + #include <linux/backing-dev.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@@ -732,7 -733,7 +733,7 @@@ static int iscsi_sw_tcp_conn_get_param( struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sockaddr_in6 addr; - int rc, len; + int rc;
switch(param) { case ISCSI_PARAM_CONN_PORT: @@@ -745,12 -746,12 +746,12 @@@ } if (param == ISCSI_PARAM_LOCAL_PORT) rc = kernel_getsockname(tcp_sw_conn->sock, - (struct sockaddr *)&addr, &len); + (struct sockaddr *)&addr); else rc = kernel_getpeername(tcp_sw_conn->sock, - (struct sockaddr *)&addr, &len); + (struct sockaddr *)&addr); spin_unlock_bh(&conn->session->frwd_lock); - if (rc) + if (rc < 0) return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *) @@@ -771,7 -772,7 +772,7 @@@ static int iscsi_sw_tcp_host_get_param( struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct sockaddr_in6 addr; - int rc, len; + int rc;
switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: @@@ -793,9 -794,9 +794,9 @@@ }
rc = kernel_getsockname(tcp_sw_conn->sock, - (struct sockaddr *)&addr, &len); + (struct sockaddr *)&addr); spin_unlock_bh(&session->frwd_lock); - if (rc) + if (rc < 0) return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *) @@@ -954,6 -955,13 +955,13 @@@ static int iscsi_sw_tcp_slave_alloc(str
static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) { + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); + struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_conn *conn = session->leadconn; + + if (conn->datadgst_en) + sdev->request_queue->backing_dev_info->capabilities + |= BDI_CAP_STABLE_WRITES; blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); blk_queue_dma_alignment(sdev->request_queue, 0); return 0; diff --combined drivers/vhost/net.c index a31d9b240af8,12bcfbac2cc9..edc6fec9ad84 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@@ -630,7 -630,7 +630,7 @@@ static int vhost_net_rx_peek_head_len(s
if (!len && vq->busyloop_timeout) { /* Both tx vq and rx socket were polled here */ - mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, 1); vhost_disable_notify(&net->dev, vq);
preempt_disable(); @@@ -763,7 -763,7 +763,7 @@@ static void handle_rx(struct vhost_net struct iov_iter fixup; __virtio16 num_buffers;
- mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, 0); sock = vq->private_data; if (!sock) goto out; @@@ -1040,7 -1040,7 +1040,7 @@@ static struct socket *get_raw_socket(in struct sockaddr_ll sa; char buf[MAX_ADDR_LEN]; } uaddr; - int uaddr_len = sizeof uaddr, r; + int r; struct socket *sock = sockfd_lookup(fd, &r);
if (!sock) @@@ -1052,8 -1052,9 +1052,8 @@@ goto err; }
- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, - &uaddr_len, 0); - if (r) + r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0); + if (r < 0) goto err;
if (uaddr.sa.sll_family != AF_PACKET) { diff --combined include/linux/if_vlan.h index 24d1976c1e61,7d30892da064..d11f41d5269f --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@@ -83,30 -83,6 +83,30 @@@ static inline bool is_vlan_dev(const st #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); +} + +static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); +} + /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats * @rx_packets: number of received packets @@@ -347,13 -323,24 +347,24 @@@ static inline int __vlan_insert_inner_t skb_push(skb, VLAN_HLEN);
/* Move the mac header sans proto to the beginning of the new header. */ - memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); + if (likely(mac_len > ETH_TLEN)) + memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); skb->mac_header -= VLAN_HLEN;
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
/* first, the ethernet type */ - veth->h_vlan_proto = vlan_proto; + if (likely(mac_len >= ETH_TLEN)) { + /* h_vlan_encapsulated_proto should already be populated, and + * skb->data has space for h_vlan_proto + */ + veth->h_vlan_proto = vlan_proto; + } else { + /* h_vlan_encapsulated_proto should not be populated, and + * skb->data has no space for h_vlan_proto + */ + veth->h_vlan_encapsulated_proto = skb->protocol; + }
/* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); diff --combined include/net/netfilter/nf_tables.h index bd2a18d66189,30eb0652b025..cd368d1b8cb8 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@@ -434,11 -434,11 +434,11 @@@ static inline struct nft_set *nft_set_c return (void *)priv - offsetof(struct nft_set, data); }
-struct nft_set *nft_set_lookup(const struct net *net, - const struct nft_table *table, - const struct nlattr *nla_set_name, - const struct nlattr *nla_set_id, - u8 genmask); +struct nft_set *nft_set_lookup_global(const struct net *net, + const struct nft_table *table, + const struct nlattr *nla_set_name, + const struct nlattr *nla_set_id, + u8 genmask);
static inline unsigned long nft_set_gc_interval(const struct nft_set *set) { @@@ -868,7 -868,7 +868,7 @@@ struct nft_chain char *name; };
-enum nft_chain_type { +enum nft_chain_types { NFT_CHAIN_T_DEFAULT = 0, NFT_CHAIN_T_ROUTE, NFT_CHAIN_T_NAT, @@@ -876,7 -876,7 +876,7 @@@ };
/** - * struct nf_chain_type - nf_tables chain type info + * struct nft_chain_type - nf_tables chain type info * * @name: name of the type * @type: numeric identifier @@@ -884,22 -884,18 +884,22 @@@ * @owner: module owner * @hook_mask: mask of valid hooks * @hooks: array of hook functions + * @init: chain initialization function + * @free: chain release function */ -struct nf_chain_type { +struct nft_chain_type { const char *name; - enum nft_chain_type type; + enum nft_chain_types type; int family; struct module *owner; unsigned int hook_mask; nf_hookfn *hooks[NF_MAX_HOOKS]; + int (*init)(struct nft_ctx *ctx); + void (*free)(struct nft_ctx *ctx); };
int nft_chain_validate_dependency(const struct nft_chain *chain, - enum nft_chain_type type); + enum nft_chain_types type); int nft_chain_validate_hooks(const struct nft_chain *chain, unsigned int hook_flags);
@@@ -921,7 -917,7 +921,7 @@@ struct nft_stats */ struct nft_base_chain { struct nf_hook_ops ops; - const struct nf_chain_type *type; + const struct nft_chain_type *type; u8 policy; u8 flags; struct nft_stats __percpu *stats; @@@ -974,8 -970,8 +974,8 @@@ struct nft_table char *name; };
-int nft_register_chain_type(const struct nf_chain_type *); -void nft_unregister_chain_type(const struct nf_chain_type *); +void nft_register_chain_type(const struct nft_chain_type *); +void nft_unregister_chain_type(const struct nft_chain_type *);
int nft_register_expr(struct nft_expr_type *); void nft_unregister_expr(struct nft_expr_type *); @@@ -1072,6 -1068,8 +1072,8 @@@ struct nft_object_ops int nft_register_obj(struct nft_object_type *obj_type); void nft_unregister_obj(struct nft_object_type *obj_type);
+ #define NFT_FLOWTABLE_DEVICE_MAX 8 + /** * struct nft_flowtable - nf_tables flow table * @@@ -1084,6 -1082,7 +1086,7 @@@ * @genmask: generation mask * @use: number of references to this flow table * @handle: unique object handle + * @dev_name: array of device names * @data: rhashtable and garbage collector * @ops: array of hooks */ @@@ -1097,6 -1096,7 +1100,7 @@@ struct nft_flowtable u32 genmask:2, use:30; u64 handle; + char *dev_name[NFT_FLOWTABLE_DEVICE_MAX]; /* runtime data below here */ struct nf_hook_ops *ops ____cacheline_aligned; struct nf_flowtable data; @@@ -1349,7 -1349,4 +1353,7 @@@ struct nft_trans_flowtable #define nft_trans_flowtable(trans) \ (((struct nft_trans_flowtable *)trans->data)->flowtable)
+int __init nft_chain_filter_init(void); +void __exit nft_chain_filter_fini(void); + #endif /* _NET_NF_TABLES_H */ diff --combined include/net/sch_generic.h index 493e311bbe93,8da32678ce18..5154c8300262 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@@ -30,6 -30,7 +30,7 @@@ struct qdisc_rate_table enum qdisc_state_t { __QDISC_STATE_SCHED, __QDISC_STATE_DEACTIVATED, + __QDISC_STATE_RUNNING, };
struct qdisc_size_table { @@@ -540,7 -541,7 +541,7 @@@ static inline bool skb_skip_tc_classify return false; }
-/* Reset all TX qdiscs greater then index of a device. */ +/* Reset all TX qdiscs greater than index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { struct Qdisc *qdisc; diff --combined net/batman-adv/gateway_client.c index c294f6fd43e0,808d2dd4a839..8b198ee798c9 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors: * * Marek Lindner * @@@ -746,7 -746,7 +746,7 @@@ bool batadv_gw_out_of_range(struct bata { struct batadv_neigh_node *neigh_curr = NULL; struct batadv_neigh_node *neigh_old = NULL; - struct batadv_orig_node *orig_dst_node; + struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *gw_node = NULL; struct batadv_gw_node *curr_gw = NULL; struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; @@@ -757,6 -757,9 +757,9 @@@
vid = batadv_get_vid(skb, 0);
+ if (is_multicast_ether_addr(ethhdr->h_dest)) + goto out; + orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid); if (!orig_dst_node) diff --combined net/batman-adv/multicast.c index de3a055f7dd8,ee56af5c43e0..a11d3d89f012 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2018 B.A.T.M.A.N. contributors: * * Linus Lüssing * @@@ -40,7 -40,6 +40,7 @@@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/netlink.h> #include <linux/printk.h> #include <linux/rculist.h> #include <linux/rcupdate.h> @@@ -53,20 -52,14 +53,20 @@@ #include <linux/types.h> #include <linux/workqueue.h> #include <net/addrconf.h> +#include <net/genetlink.h> #include <net/if_inet6.h> #include <net/ip.h> #include <net/ipv6.h> +#include <net/netlink.h> +#include <net/sock.h> #include <uapi/linux/batadv_packet.h> +#include <uapi/linux/batman_adv.h>
#include "hard-interface.h" #include "hash.h" #include "log.h" +#include "netlink.h" +#include "soft-interface.h" #include "translation-table.h" #include "tvlv.h"
@@@ -109,36 -102,7 +109,36 @@@ static struct net_device *batadv_mcast_ }
/** + * batadv_mcast_addr_is_ipv4() - check if multicast MAC is IPv4 + * @addr: the MAC address to check + * + * Return: True, if MAC address is one reserved for IPv4 multicast, false + * otherwise. + */ +static bool batadv_mcast_addr_is_ipv4(const u8 *addr) +{ + static const u8 prefix[] = {0x01, 0x00, 0x5E}; + + return memcmp(prefix, addr, sizeof(prefix)) == 0; +} + +/** + * batadv_mcast_addr_is_ipv6() - check if multicast MAC is IPv6 + * @addr: the MAC address to check + * + * Return: True, if MAC address is one reserved for IPv6 multicast, false + * otherwise. + */ +static bool batadv_mcast_addr_is_ipv6(const u8 *addr) +{ + static const u8 prefix[] = {0x33, 0x33}; + + return memcmp(prefix, addr, sizeof(prefix)) == 0; +} + +/** * batadv_mcast_mla_softif_get() - get softif multicast listeners + * @bat_priv: the bat priv with all the soft interface information * @dev: the device to collect multicast addresses from * @mcast_list: a list to put found addresses into * @@@ -155,12 -119,9 +155,12 @@@ * Return: -ENOMEM on memory allocation error or the number of * items added to the mcast_list otherwise. */ -static int batadv_mcast_mla_softif_get(struct net_device *dev, +static int batadv_mcast_mla_softif_get(struct batadv_priv *bat_priv, + struct net_device *dev, struct hlist_head *mcast_list) { + bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4; + bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6; struct net_device *bridge = batadv_mcast_get_bridge(dev); struct netdev_hw_addr *mc_list_entry; struct batadv_hw_addr *new; @@@ -168,12 -129,6 +168,12 @@@
netif_addr_lock_bh(bridge ? bridge : dev); netdev_for_each_mc_addr(mc_list_entry, bridge ? bridge : dev) { + if (all_ipv4 && batadv_mcast_addr_is_ipv4(mc_list_entry->addr)) + continue; + + if (all_ipv6 && batadv_mcast_addr_is_ipv6(mc_list_entry->addr)) + continue; + new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) { ret = -ENOMEM; @@@ -238,7 -193,6 +238,7 @@@ static void batadv_mcast_mla_br_addr_cp
/** * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners + * @bat_priv: the bat priv with all the soft interface information * @dev: a bridge slave whose bridge to collect multicast addresses from * @mcast_list: a list to put found addresses into * @@@ -250,13 -204,10 +250,13 @@@ * Return: -ENOMEM on memory allocation error or the number of * items added to the mcast_list otherwise. */ -static int batadv_mcast_mla_bridge_get(struct net_device *dev, +static int batadv_mcast_mla_bridge_get(struct batadv_priv *bat_priv, + struct net_device *dev, struct hlist_head *mcast_list) { struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); + bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4; + bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6; struct br_ip_list *br_ip_entry, *tmp; struct batadv_hw_addr *new; u8 mcast_addr[ETH_ALEN]; @@@ -270,12 -221,6 +270,12 @@@ goto out;
list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { + if (all_ipv4 && br_ip_entry->addr.proto == htons(ETH_P_IP)) + continue; + + if (all_ipv6 && br_ip_entry->addr.proto == htons(ETH_P_IPV6)) + continue; + batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) continue; @@@ -623,11 -568,11 +623,11 @@@ static void __batadv_mcast_mla_update(s if (!batadv_mcast_mla_tvlv_update(bat_priv)) goto update;
- ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list); + ret = batadv_mcast_mla_softif_get(bat_priv, soft_iface, &mcast_list); if (ret < 0) goto out;
- ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list); + ret = batadv_mcast_mla_bridge_get(bat_priv, soft_iface, &mcast_list); if (ret < 0) goto out;
@@@ -869,8 -814,8 +869,8 @@@ static struct batadv_orig_node batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, struct ethhdr *ethhdr) { - return batadv_transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest, BATADV_NO_FLAGS); + return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, + BATADV_NO_FLAGS); }
/** @@@ -1341,236 -1286,6 +1341,236 @@@ int batadv_mcast_flags_seq_print_text(s #endif
/** + * batadv_mcast_mesh_info_put() - put multicast info into a netlink message + * @msg: buffer for the message + * @bat_priv: the bat priv with all the soft interface information + * + * Return: 0 or error code. + */ +int batadv_mcast_mesh_info_put(struct sk_buff *msg, + struct batadv_priv *bat_priv) +{ + u32 flags = bat_priv->mcast.flags; + u32 flags_priv = BATADV_NO_FLAGS; + + if (bat_priv->mcast.bridged) { + flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; + + if (bat_priv->mcast.querier_ipv4.exists) + flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; + if (bat_priv->mcast.querier_ipv6.exists) + flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; + if (bat_priv->mcast.querier_ipv4.shadowing) + flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; + if (bat_priv->mcast.querier_ipv6.shadowing) + flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; + } + + if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || + nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) + return -EMSGSIZE; + + return 0; +} + +/** + * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table + * to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @orig_node: originator to dump the multicast flags of + * + * Return: 0 or error code. + */ +static int +batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_orig_node *orig_node) +{ + void *hdr; + + hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, + NLM_F_MULTI, BATADV_CMD_GET_MCAST_FLAGS); + if (!hdr) + return -ENOBUFS; + + if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, + orig_node->orig)) { + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; + } + + if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, + &orig_node->capabilities)) { + if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, + orig_node->mcast_flags)) { + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; + } + } + + genlmsg_end(msg, hdr); + return 0; +} + +/** + * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags + * table to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @head: bucket to dump + * @idx_skip: How many entries to skip + * + * Return: 0 or error code. + */ +static int +batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + struct hlist_head *head, long *idx_skip) +{ + struct batadv_orig_node *orig_node; + long idx = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) { + if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, + &orig_node->capa_initialized)) + continue; + + if (idx < *idx_skip) + goto skip; + + if (batadv_mcast_flags_dump_entry(msg, portid, seq, + orig_node)) { + rcu_read_unlock(); + *idx_skip = idx; + + return -EMSGSIZE; + } + +skip: + idx++; + } + rcu_read_unlock(); + + return 0; +} + +/** + * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket + * @msg: buffer for the message + * @portid: netlink port + * @seq: Sequence number of netlink message + * @bat_priv: the bat priv with all the soft interface information + * @bucket: current bucket to dump + * @idx: index in current bucket to the next entry to dump + * + * Return: 0 or error code. + */ +static int +__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_priv *bat_priv, long *bucket, long *idx) +{ + struct batadv_hashtable *hash = bat_priv->orig_hash; + long bucket_tmp = *bucket; + struct hlist_head *head; + long idx_tmp = *idx; + + while (bucket_tmp < hash->size) { + head = &hash->table[bucket_tmp]; + + if (batadv_mcast_flags_dump_bucket(msg, portid, seq, head, + &idx_tmp)) + break; + + bucket_tmp++; + idx_tmp = 0; + } + + *bucket = bucket_tmp; + *idx = idx_tmp; + + return msg->len; +} + +/** + * batadv_mcast_netlink_get_primary() - get primary interface from netlink + * callback + * @cb: netlink callback structure + * @primary_if: the primary interface pointer to return the result in + * + * Return: 0 or error code. + */ +static int +batadv_mcast_netlink_get_primary(struct netlink_callback *cb, + struct batadv_hard_iface **primary_if) +{ + struct batadv_hard_iface *hard_iface = NULL; + struct net *net = sock_net(cb->skb->sk); + struct net_device *soft_iface; + struct batadv_priv *bat_priv; + int ifindex; + int ret = 0; + + ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); + if (!ifindex) + return -EINVAL; + + soft_iface = dev_get_by_index(net, ifindex); + if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { + ret = -ENODEV; + goto out; + } + + bat_priv = netdev_priv(soft_iface); + + hard_iface = batadv_primary_if_get_selected(bat_priv); + if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out; + } + +out: + if (soft_iface) + dev_put(soft_iface); + + if (!ret && primary_if) + *primary_if = hard_iface; + else + batadv_hardif_put(hard_iface); + + return ret; +} + +/** + * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket + * @msg: buffer for the message + * @cb: callback structure containing arguments + * + * Return: message length. + */ +int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct batadv_hard_iface *primary_if = NULL; + int portid = NETLINK_CB(cb->skb).portid; + struct batadv_priv *bat_priv; + long *bucket = &cb->args[0]; + long *idx = &cb->args[1]; + int ret; + + ret = batadv_mcast_netlink_get_primary(cb, &primary_if); + if (ret) + return ret; + + bat_priv = netdev_priv(primary_if->soft_iface); + ret = __batadv_mcast_flags_dump(msg, portid, cb->nlh->nlmsg_seq, + bat_priv, bucket, idx); + + batadv_hardif_put(primary_if); + return ret; +} + +/** * batadv_mcast_free() - free the multicast optimizations structures * @bat_priv: the bat priv with all the soft interface information */ diff --combined net/core/dev.c index 8edb58829124,ef0cc6ea5f8d..9b04a9fd1dfd --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -1571,27 -1571,6 +1571,27 @@@ static void dev_disable_gro_hw(struct n netdev_WARN(dev, "failed to disable GRO_HW!\n"); }
+const char *netdev_cmd_to_name(enum netdev_cmd cmd) +{ +#define N(val) \ + case NETDEV_##val: \ + return "NETDEV_" __stringify(val); + switch (cmd) { + N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) + N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) + N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) + N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) + N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) + N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) + N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) + N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) + N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) + }; +#undef N + return "UNKNOWN_NETDEV_EVENT"; +} +EXPORT_SYMBOL_GPL(netdev_cmd_to_name); + static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, struct net_device *dev) { @@@ -1625,8 -1604,6 +1625,8 @@@ int register_netdevice_notifier(struct struct net *net; int err;
+ /* Close race with setup_net() and cleanup_net() */ + down_write(&pernet_ops_rwsem); rtnl_lock(); err = raw_notifier_chain_register(&netdev_chain, nb); if (err) @@@ -1649,7 -1626,6 +1649,7 @@@
unlock: rtnl_unlock(); + up_write(&pernet_ops_rwsem); return err;
rollback: @@@ -1694,8 -1670,6 +1694,8 @@@ int unregister_netdevice_notifier(struc struct net *net; int err;
+ /* Close race with setup_net() and cleanup_net() */ + down_write(&pernet_ops_rwsem); rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); if (err) @@@ -1713,7 -1687,6 +1713,7 @@@ } unlock: rtnl_unlock(); + up_write(&pernet_ops_rwsem); return err; } EXPORT_SYMBOL(unregister_netdevice_notifier); @@@ -2405,7 -2378,7 +2405,7 @@@ EXPORT_SYMBOL(netdev_set_num_tc)
/* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues - * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. + * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. */ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { @@@ -2762,7 -2735,7 +2762,7 @@@ __be16 skb_network_protocol(struct sk_b if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) return 0;
- eth = (struct ethhdr *)skb_mac_header(skb); + eth = (struct ethhdr *)skb->data; type = eth->h_proto; }
@@@ -4386,9 -4359,6 +4386,9 @@@ int netdev_rx_handler_register(struct n if (netdev_is_rx_handler_busy(dev)) return -EBUSY;
+ if (dev->priv_flags & IFF_NO_RX_HANDLER) + return -EINVAL; + /* Note: rx_handler_data must be set before rx_handler */ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); @@@ -7584,19 -7554,6 +7584,19 @@@ static netdev_features_t netdev_fix_fea } }
+ /* LRO/HW-GRO features cannot be combined with RX-FCS */ + if (features & NETIF_F_RXFCS) { + if (features & NETIF_F_LRO) { + netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); + features &= ~NETIF_F_LRO; + } + + if (features & NETIF_F_GRO_HW) { + netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); + features &= ~NETIF_F_GRO_HW; + } + } + return features; }
@@@ -7668,24 -7625,6 +7668,24 @@@ sync_lower } }
+ if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + dev->features = features; + err |= vlan_get_rx_ctag_filter_info(dev); + } else { + vlan_drop_rx_ctag_filter_info(dev); + } + } + + if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { + if (features & NETIF_F_HW_VLAN_STAG_FILTER) { + dev->features = features; + err |= vlan_get_rx_stag_filter_info(dev); + } else { + vlan_drop_rx_stag_filter_info(dev); + } + } + dev->features = features; }
@@@ -8071,8 -8010,7 +8071,8 @@@ int register_netdev(struct net_device * { int err;
- rtnl_lock(); + if (rtnl_lock_killable()) + return -EINTR; err = register_netdevice(dev); rtnl_unlock(); return err; @@@ -8122,6 -8060,7 +8122,6 @@@ static void netdev_wait_allrefs(struct rcu_barrier(); rtnl_lock();
- call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { /* We must not have linkwatch events @@@ -8193,6 -8132,10 +8193,6 @@@ void netdev_run_todo(void = list_first_entry(&list, struct net_device, todo_list); list_del(&dev->todo_list);
- rtnl_lock(); - call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); - __rtnl_unlock(); - if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { pr_err("network todo '%s' but state %d\n", dev->name, dev->reg_state); @@@ -8210,9 -8153,8 +8210,9 @@@ BUG_ON(!list_empty(&dev->ptype_specific)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); +#if IS_ENABLED(CONFIG_DECNET) WARN_ON(dev->dn_ptr); - +#endif if (dev->priv_destructor) dev->priv_destructor(dev); if (dev->needs_free_netdev) @@@ -8634,6 -8576,7 +8634,6 @@@ int dev_change_net_namespace(struct net */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); rcu_barrier(); - call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
new_nsid = peernet2id_alloc(dev_net(dev), net); /* If there is an ifindex conflict assign a new one */ diff --combined net/core/skbuff.c index b5c75d4fcf37,857e4e6f751a..1bca1e0fc8f7 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -77,8 -77,8 +77,8 @@@ #include <linux/capability.h> #include <linux/user_namespace.h>
-struct kmem_cache *skbuff_head_cache __read_mostly; -static struct kmem_cache *skbuff_fclone_cache __read_mostly; +struct kmem_cache *skbuff_head_cache __ro_after_init; +static struct kmem_cache *skbuff_fclone_cache __ro_after_init; int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags);
@@@ -890,7 -890,7 +890,7 @@@ struct sk_buff *skb_morph(struct sk_buf } EXPORT_SYMBOL_GPL(skb_morph);
-static int mm_account_pinned_pages(struct mmpin *mmp, size_t size) +int mm_account_pinned_pages(struct mmpin *mmp, size_t size) { unsigned long max_pg, num_pg, new_pg, old_pg; struct user_struct *user; @@@ -919,16 -919,14 +919,16 @@@
return 0; } +EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
-static void mm_unaccount_pinned_pages(struct mmpin *mmp) +void mm_unaccount_pinned_pages(struct mmpin *mmp) { if (mmp->user) { atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); free_uid(mmp->user); } } +EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) { @@@ -3460,19 -3458,6 +3460,19 @@@ void *skb_pull_rcsum(struct sk_buff *sk } EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) +{ + skb_frag_t head_frag; + struct page *page; + + page = virt_to_head_page(frag_skb->head); + head_frag.page.p = page; + head_frag.page_offset = frag_skb->data - + (unsigned char *)page_address(page); + head_frag.size = skb_headlen(frag_skb); + return head_frag; +} + /** * skb_segment - Perform protocol segmentation on skb. * @head_skb: buffer to segment @@@ -3677,19 -3662,15 +3677,19 @@@ normal
while (pos < offset + len) { if (i >= nfrags) { - BUG_ON(skb_headlen(list_skb)); - i = 0; nfrags = skb_shinfo(list_skb)->nr_frags; frag = skb_shinfo(list_skb)->frags; frag_skb = list_skb; + if (!skb_headlen(list_skb)) { + BUG_ON(!nfrags); + } else { + BUG_ON(!list_skb->head_frag);
- BUG_ON(!nfrags); - + /* to make room for head_frag. */ + i--; + frag--; + } if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) @@@ -3706,7 -3687,7 +3706,7 @@@ goto err; }
- *nskb_frag = *frag; + *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; __skb_frag_ref(nskb_frag); size = skb_frag_size(nskb_frag);
@@@ -5047,8 -5028,10 +5047,10 @@@ static struct sk_buff *skb_reorder_vlan }
mac_len = skb->data - skb_mac_header(skb); - memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), - mac_len - VLAN_HLEN - ETH_TLEN); + if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { + memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), + mac_len - VLAN_HLEN - ETH_TLEN); + } skb->mac_header += VLAN_HLEN; return skb; } diff --combined net/ipv4/ip_tunnel.c index 5fcb17cb426b,a7fd1c5a2a14..de6d94482fe7 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@@ -290,6 -290,22 +290,6 @@@ failed return ERR_PTR(err); }
-static inline void init_tunnel_flow(struct flowi4 *fl4, - int proto, - __be32 daddr, __be32 saddr, - __be32 key, __u8 tos, int oif, - __u32 mark) -{ - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_oif = oif; - fl4->daddr = daddr; - fl4->saddr = saddr; - fl4->flowi4_tos = tos; - fl4->flowi4_proto = proto; - fl4->fl4_gre_key = key; - fl4->flowi4_mark = mark; -} - static int ip_tunnel_bind_dev(struct net_device *dev) { struct net_device *tdev = NULL; @@@ -306,10 -322,10 +306,10 @@@ struct flowi4 fl4; struct rtable *rt;
- init_tunnel_flow(&fl4, iph->protocol, iph->daddr, - iph->saddr, tunnel->parms.o_key, - RT_TOS(iph->tos), tunnel->parms.link, - tunnel->fwmark); + ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr, + iph->saddr, tunnel->parms.o_key, + RT_TOS(iph->tos), tunnel->parms.link, + tunnel->fwmark); rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) { @@@ -346,12 -362,18 +346,17 @@@ static struct ip_tunnel *ip_tunnel_crea struct ip_tunnel *nt; struct net_device *dev; int t_hlen; + int mtu; + int err;
- BUG_ON(!itn->fb_tunnel_dev); - dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms); + dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms); if (IS_ERR(dev)) return ERR_CAST(dev);
- dev->mtu = ip_tunnel_bind_dev(dev); + mtu = ip_tunnel_bind_dev(dev); + err = dev_set_mtu(dev, mtu); + if (err) + goto err_dev_set_mtu;
nt = netdev_priv(dev); t_hlen = nt->hlen + sizeof(struct iphdr); @@@ -359,6 -381,10 +364,10 @@@ dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; ip_tunnel_add(itn, nt); return nt; + + err_dev_set_mtu: + unregister_netdevice(dev); + return ERR_PTR(err); }
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, @@@ -564,8 -590,8 +573,8 @@@ void ip_md_tunnel_xmit(struct sk_buff * else if (skb->protocol == htons(ETH_P_IPV6)) tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); } - init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0, - RT_TOS(tos), tunnel->parms.link, tunnel->fwmark); + ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0, + RT_TOS(tos), tunnel->parms.link, tunnel->fwmark); if (tunnel->encap.type != TUNNEL_ENCAP_NONE) goto tx_error; rt = ip_route_output_key(tunnel->net, &fl4); @@@ -693,9 -719,9 +702,9 @@@ void ip_tunnel_xmit(struct sk_buff *skb } }
- init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, - tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, - tunnel->fwmark); + ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, + tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, + tunnel->fwmark);
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) goto tx_error; @@@ -821,6 -847,7 +830,6 @@@ int ip_tunnel_ioctl(struct net_device * struct net *net = t->net; struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
- BUG_ON(!itn->fb_tunnel_dev); switch (cmd) { case SIOCGETTUNNEL: if (dev == itn->fb_tunnel_dev) { @@@ -845,7 -872,7 +854,7 @@@ p->o_key = 0; }
- t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); + t = ip_tunnel_find(itn, p, itn->type);
if (cmd == SIOCADDTUNNEL) { if (!t) { @@@ -989,15 -1016,10 +998,15 @@@ int ip_tunnel_init_net(struct net *net struct ip_tunnel_parm parms; unsigned int i;
+ itn->rtnl_link_ops = ops; for (i = 0; i < IP_TNL_HASH_SIZE; i++) INIT_HLIST_HEAD(&itn->tunnels[i]);
- if (!ops) { + if (!ops || !net_has_fallback_tunnels(net)) { + struct ip_tunnel_net *it_init_net; + + it_init_net = net_generic(&init_net, ip_tnl_net_id); + itn->type = it_init_net->type; itn->fb_tunnel_dev = NULL; return 0; } @@@ -1015,7 -1037,6 +1024,7 @@@ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev); ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); + itn->type = itn->fb_tunnel_dev->type; } rtnl_unlock();
@@@ -1023,10 -1044,10 +1032,10 @@@ } EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
-static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head, +static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn, + struct list_head *head, struct rtnl_link_ops *ops) { - struct net *net = dev_net(itn->fb_tunnel_dev); struct net_device *dev, *aux; int h;
@@@ -1058,7 -1079,7 +1067,7 @@@ void ip_tunnel_delete_nets(struct list_ rtnl_lock(); list_for_each_entry(net, net_list, exit_list) { itn = net_generic(net, id); - ip_tunnel_destroy(itn, &list, ops); + ip_tunnel_destroy(net, itn, &list, ops); } unregister_netdevice_many(&list); rtnl_unlock(); @@@ -1090,17 -1111,29 +1099,29 @@@ int ip_tunnel_newlink(struct net_devic nt->fwmark = fwmark; err = register_netdevice(dev); if (err) - goto out; + goto err_register_netdevice;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) eth_hw_addr_random(dev);
mtu = ip_tunnel_bind_dev(dev); - if (!tb[IFLA_MTU]) - dev->mtu = mtu; + if (tb[IFLA_MTU]) { + unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; + + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, + (unsigned int)(max - sizeof(struct iphdr))); + } + + err = dev_set_mtu(dev, mtu); + if (err) + goto err_dev_set_mtu;
ip_tunnel_add(itn, nt); - out: + return 0; + + err_dev_set_mtu: + unregister_netdevice(dev); + err_register_netdevice: return err; } EXPORT_SYMBOL_GPL(ip_tunnel_newlink); diff --combined net/ipv4/netfilter/Makefile index 62ede5e3a3de,9bd19cd18849..7523ddb2566b --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@@ -29,7 -29,7 +29,7 @@@ obj-$(CONFIG_NF_NAT_H323) += nf_nat_h32 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o - nf_nat_snmp_basic-y : nf_nat_snmp_basic-asn1.h nf_nat_snmp_basic-asn1.c + $(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic-asn1.h obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h
@@@ -39,6 -39,7 +39,6 @@@ obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) + # NAT protocols (nf_nat) obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
-obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o @@@ -46,6 -47,7 +46,6 @@@ obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_i obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o -obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# flow table support obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o diff --combined net/ipv4/tcp_input.c index 451ef3012636,ff6cd98ce8d5..367def6ddeda --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -1358,6 -1358,9 +1358,6 @@@ static struct sk_buff *tcp_shift_skb_da int len; int in_sack;
- if (!sk_can_gso(sk)) - goto fallback; - /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) @@@ -5859,12 -5862,10 +5859,12 @@@ int tcp_rcv_state_process(struct sock * tp->rx_opt.saw_tstamp = 0; req = tp->fastopen_rsk; if (req) { + bool req_stolen; + WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1);
- if (!tcp_check_req(sk, skb, req, true)) + if (!tcp_check_req(sk, skb, req, true, &req_stolen)) goto discard; }
@@@ -6255,6 -6256,9 +6255,9 @@@ int tcp_conn_request(struct request_soc if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt);
+ if (IS_ENABLED(CONFIG_SMC) && want_cookie) + tmp_opt.smc_ok = 0; + tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb, sk); inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent; diff --combined net/ipv6/ip6_output.c index 323d7a354ffb,5cb18c8ba9b2..e6eaa4dd9f60 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@@ -71,7 -71,7 +71,7 @@@ static int ip6_finish_output2(struct ne struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) && - ((mroute6_socket(net, skb) && + ((mroute6_is_socket(net, skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { @@@ -1246,7 -1246,7 +1246,7 @@@ static int __ip6_append_data(struct soc const struct sockcm_cookie *sockc) { struct sk_buff *skb, *skb_prev = NULL; - unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; int exthdrlen = 0; int dst_exthdrlen = 0; int hh_len; @@@ -1259,7 -1259,6 +1259,7 @@@ struct ipv6_txoptions *opt = v6_cork->opt; int csummode = CHECKSUM_NONE; unsigned int maxnonfragsize, headersize; + unsigned int wmem_alloc_delta = 0;
skb = skb_peek_tail(queue); if (!skb) { @@@ -1283,6 -1282,12 +1283,12 @@@ sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len;
+ /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit + * the first fragment + */ + if (headersize + transhdrlen > mtu) + goto emsgsize; + if (cork->length + length > mtu - headersize && ipc6->dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_RAW)) { @@@ -1298,9 -1303,8 +1304,8 @@@
if (cork->length + length > maxnonfragsize - headersize) { emsgsize: - ipv6_local_error(sk, EMSGSIZE, fl6, - mtu - headersize + - sizeof(struct ipv6hdr)); + pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); + ipv6_local_error(sk, EMSGSIZE, fl6, pmtu); return -EMSGSIZE; }
@@@ -1412,10 -1416,11 +1417,10 @@@ alloc_new_skb (flags & MSG_DONTWAIT), &err); } else { skb = NULL; - if (refcount_read(&sk->sk_wmem_alloc) <= + if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= 2 * sk->sk_sndbuf) - skb = sock_wmalloc(sk, - alloclen + hh_len, 1, - sk->sk_allocation); + skb = alloc_skb(alloclen + hh_len, + sk->sk_allocation); if (unlikely(!skb)) err = -ENOBUFS; } @@@ -1474,11 -1479,6 +1479,11 @@@ /* * Put the packet on the pending queue */ + if (!skb->destructor) { + skb->destructor = sock_wfree; + skb->sk = sk; + wmem_alloc_delta += skb->truesize; + } __skb_queue_tail(queue, skb); continue; } @@@ -1525,13 -1525,12 +1530,13 @@@ skb->len += copy; skb->data_len += copy; skb->truesize += copy; - refcount_add(copy, &sk->sk_wmem_alloc); + wmem_alloc_delta += copy; } offset += copy; length -= copy; }
+ refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); return 0;
error_efault: @@@ -1539,7 -1538,6 +1544,7 @@@ error: cork->length -= length; IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); + refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); return err; }
diff --combined net/ipv6/ip6_vti.c index 60b771f49fb5,ce18cd20389d..6ebb2e8777f4 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@@ -622,11 -622,12 +622,12 @@@ static int vti6_err(struct sk_buff *skb return 0; }
- static void vti6_link_config(struct ip6_tnl *t) + static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu) { struct net_device *dev = t->dev; struct __ip6_tnl_parm *p = &t->parms; struct net_device *tdev = NULL; + int mtu;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); @@@ -640,12 -641,17 +641,17 @@@ else dev->flags &= ~IFF_POINTOPOINT;
+ if (keep_mtu && dev->mtu) { + dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu); + return; + } + if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); struct rt6_info *rt = rt6_lookup(t->net, &p->raddr, &p->laddr, - p->link, strict); + p->link, NULL, strict);
if (rt) tdev = rt->dst.dev; @@@ -656,20 -662,25 +662,25 @@@ tdev = __dev_get_by_index(t->net, p->link);
if (tdev) - dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len, - IPV6_MIN_MTU); + mtu = tdev->mtu - sizeof(struct ipv6hdr); + else + mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr); + + dev->mtu = max_t(int, mtu, IPV6_MIN_MTU); }
/** * vti6_tnl_change - update the tunnel parameters * @t: tunnel to be changed * @p: tunnel configuration parameters + * @keep_mtu: MTU was set from userspace, don't re-compute it * * Description: * vti6_tnl_change() updates the tunnel parameters **/ static int - vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) + vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, + bool keep_mtu) { t->parms.laddr = p->laddr; t->parms.raddr = p->raddr; @@@ -679,11 -690,12 +690,12 @@@ t->parms.proto = p->proto; t->parms.fwmark = p->fwmark; dst_cache_reset(&t->dst_cache); - vti6_link_config(t); + vti6_link_config(t, keep_mtu); return 0; }
- static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) + static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p, + bool keep_mtu) { struct net *net = dev_net(t->dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); @@@ -691,7 -703,7 +703,7 @@@
vti6_tnl_unlink(ip6n, t); synchronize_net(); - err = vti6_tnl_change(t, p); + err = vti6_tnl_change(t, p, keep_mtu); vti6_tnl_link(ip6n, t); netdev_state_change(t->dev); return err; @@@ -804,7 -816,7 +816,7 @@@ vti6_ioctl(struct net_device *dev, stru } else t = netdev_priv(dev);
- err = vti6_update(t, &p1); + err = vti6_update(t, &p1, false); } if (t) { err = 0; @@@ -866,10 -878,8 +878,8 @@@ static void vti6_dev_setup(struct net_d dev->priv_destructor = vti6_dev_free;
dev->type = ARPHRD_TUNNEL6; - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); - dev->mtu = ETH_DATA_LEN; dev->min_mtu = IPV6_MIN_MTU; - dev->max_mtu = IP_MAX_MTU; + dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); netif_keep_dst(dev); @@@ -905,7 -915,7 +915,7 @@@ static int vti6_dev_init(struct net_dev
if (err) return err; - vti6_link_config(t); + vti6_link_config(t, true); return 0; }
@@@ -1010,7 -1020,7 +1020,7 @@@ static int vti6_changelink(struct net_d } else t = netdev_priv(dev);
- return vti6_update(t, &p); + return vti6_update(t, &p, tb && tb[IFLA_MTU]); }
static size_t vti6_get_size(const struct net_device *dev) diff --combined net/ipv6/route.c index e461ef1158b6,fc74352fac12..f239f91d2efb --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -450,10 -450,8 +450,10 @@@ static bool rt6_check_expired(const str return false; }
-static struct rt6_info *rt6_multipath_select(struct rt6_info *match, +static struct rt6_info *rt6_multipath_select(const struct net *net, + struct rt6_info *match, struct flowi6 *fl6, int oif, + const struct sk_buff *skb, int strict) { struct rt6_info *sibling, *next_sibling; @@@ -462,7 -460,7 +462,7 @@@ * case it will always be non-zero. Otherwise now is the time to do it. */ if (!fl6->mp_hash) - fl6->mp_hash = rt6_multipath_hash(fl6, NULL); + fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound)) return match; @@@ -916,13 -914,14 +916,16 @@@ static bool ip6_hold_safe(struct net *n
static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, - struct flowi6 *fl6, int flags) + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { struct rt6_info *rt, *rt_cache; struct fib6_node *fn;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) + flags &= ~RT6_LOOKUP_F_IFACE; + rcu_read_lock(); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: @@@ -933,8 -932,8 +936,8 @@@ rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) - rt = rt6_multipath_select(rt, fl6, - fl6->flowi6_oif, flags); + rt = rt6_multipath_select(net, rt, fl6, fl6->flowi6_oif, + skb, flags); } if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); @@@ -958,15 -957,14 +961,15 @@@ }
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, - int flags) + const struct sk_buff *skb, int flags) { - return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); + return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup); } EXPORT_SYMBOL_GPL(ip6_route_lookup);
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, - const struct in6_addr *saddr, int oif, int strict) + const struct in6_addr *saddr, int oif, + const struct sk_buff *skb, int strict) { struct flowi6 fl6 = { .flowi6_oif = oif, @@@ -980,7 -978,7 +983,7 @@@ flags |= RT6_LOOKUP_F_HAS_SADDR; }
- dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup); + dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup); if (dst->error == 0) return (struct rt6_info *) dst;
@@@ -1631,11 -1629,10 +1634,10 @@@ static void rt6_age_examine_exception(s struct neighbour *neigh; __u8 neigh_flags = 0;
- neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); - if (neigh) { + neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); + if (neigh) neigh_flags = neigh->flags; - neigh_release(neigh); - } + if (!(neigh_flags & NTF_ROUTER)) { RT6_TRACE("purging route %p via non-router but gateway\n", rt); @@@ -1659,7 -1656,8 +1661,8 @@@ void rt6_age_exceptions(struct rt6_inf if (!rcu_access_pointer(rt->rt6i_exception_bucket)) return;
- spin_lock_bh(&rt6_exception_lock); + rcu_read_lock_bh(); + spin_lock(&rt6_exception_lock); bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock));
@@@ -1673,12 -1671,12 +1676,13 @@@ bucket++; } } - spin_unlock_bh(&rt6_exception_lock); + spin_unlock(&rt6_exception_lock); + rcu_read_unlock_bh(); }
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, - int oif, struct flowi6 *fl6, int flags) + int oif, struct flowi6 *fl6, + const struct sk_buff *skb, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt, *rt_cache; @@@ -1700,7 -1698,7 +1704,7 @@@ redo_rt6_select: rt = rt6_select(net, fn, oif, strict); if (rt->rt6i_nsiblings) - rt = rt6_multipath_select(rt, fl6, oif, strict); + rt = rt6_multipath_select(net, rt, fl6, oif, skb, strict); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) @@@ -1799,35 -1797,28 +1803,35 @@@ uncached_rt_out } EXPORT_SYMBOL_GPL(ip6_pol_route);
-static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, - struct flowi6 *fl6, int flags) +static struct rt6_info *ip6_pol_route_input(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { - return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); + return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags); }
struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, - struct flowi6 *fl6, int flags) + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE;
- return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); + return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input); } EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
static void ip6_multipath_l3_keys(const struct sk_buff *skb, - struct flow_keys *keys) + struct flow_keys *keys, + struct flow_keys *flkeys) { const struct ipv6hdr *outer_iph = ipv6_hdr(skb); const struct ipv6hdr *key_iph = outer_iph; + struct flow_keys *_flkeys = flkeys; const struct ipv6hdr *inner_iph; const struct icmp6hdr *icmph; struct ipv6hdr _inner_iph; @@@ -1849,76 -1840,26 +1853,76 @@@ goto out;
key_iph = inner_iph; + _flkeys = NULL; out: - memset(keys, 0, sizeof(*keys)); - keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - keys->addrs.v6addrs.src = key_iph->saddr; - keys->addrs.v6addrs.dst = key_iph->daddr; - keys->tags.flow_label = ip6_flowinfo(key_iph); - keys->basic.ip_proto = key_iph->nexthdr; + if (_flkeys) { + keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src; + keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst; + keys->tags.flow_label = _flkeys->tags.flow_label; + keys->basic.ip_proto = _flkeys->basic.ip_proto; + } else { + keys->addrs.v6addrs.src = key_iph->saddr; + keys->addrs.v6addrs.dst = key_iph->daddr; + keys->tags.flow_label = ip6_flowinfo(key_iph); + keys->basic.ip_proto = key_iph->nexthdr; + } }
/* if skb is set it will be used and fl6 can be NULL */ -u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb) +u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, + const struct sk_buff *skb, struct flow_keys *flkeys) { struct flow_keys hash_keys; + u32 mhash;
- if (skb) { - ip6_multipath_l3_keys(skb, &hash_keys); - return flow_hash_from_keys(&hash_keys) >> 1; + switch (ip6_multipath_hash_policy(net)) { + case 0: + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + if (skb) { + ip6_multipath_l3_keys(skb, &hash_keys, flkeys); + } else { + hash_keys.addrs.v6addrs.src = fl6->saddr; + hash_keys.addrs.v6addrs.dst = fl6->daddr; + hash_keys.tags.flow_label = (__force u32)fl6->flowlabel; + hash_keys.basic.ip_proto = fl6->flowi6_proto; + } + break; + case 1: + if (skb) { + unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; + struct flow_keys keys; + + /* short-circuit if we already have L4 hash present */ + if (skb->l4_hash) + return skb_get_hash_raw(skb) >> 1; + + memset(&hash_keys, 0, sizeof(hash_keys)); + + if (!flkeys) { + skb_flow_dissect_flow_keys(skb, &keys, flag); + flkeys = &keys; + } + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src; + hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst; + hash_keys.ports.src = flkeys->ports.src; + hash_keys.ports.dst = flkeys->ports.dst; + hash_keys.basic.ip_proto = flkeys->basic.ip_proto; + } else { + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + hash_keys.addrs.v6addrs.src = fl6->saddr; + hash_keys.addrs.v6addrs.dst = fl6->daddr; + hash_keys.ports.src = fl6->fl6_sport; + hash_keys.ports.dst = fl6->fl6_dport; + hash_keys.basic.ip_proto = fl6->flowi6_proto; + } + break; } + mhash = flow_hash_from_keys(&hash_keys);
- return get_hash_from_flowi6(fl6) >> 1; + return mhash >> 1; }
void ip6_route_input(struct sk_buff *skb) @@@ -1935,29 -1876,20 +1939,29 @@@ .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, }; + struct flow_keys *flkeys = NULL, _flkeys;
tun_info = skb_tunnel_info(skb); if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; + + if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys)) + flkeys = &_flkeys; + if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6)) - fl6.mp_hash = rt6_multipath_hash(&fl6, skb); + fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys); skb_dst_drop(skb); - skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); + skb_dst_set(skb, + ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags)); }
-static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, - struct flowi6 *fl6, int flags) +static struct rt6_info *ip6_pol_route_output(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { - return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); + return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags); }
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, @@@ -1985,7 -1917,7 +1989,7 @@@ else if (sk) flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
- return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); + return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output); } EXPORT_SYMBOL_GPL(ip6_route_output_flags);
@@@ -2234,7 -2166,6 +2238,7 @@@ struct ip6rd_flowi static struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, + const struct sk_buff *skb, int flags) { struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; @@@ -2308,9 -2239,8 +2312,9 @@@ out };
static struct dst_entry *ip6_route_redirect(struct net *net, - const struct flowi6 *fl6, - const struct in6_addr *gateway) + const struct flowi6 *fl6, + const struct sk_buff *skb, + const struct in6_addr *gateway) { int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip6rd_flowi rdfl; @@@ -2318,7 -2248,7 +2322,7 @@@ rdfl.fl6 = *fl6; rdfl.gateway = *gateway;
- return fib6_rule_lookup(net, &rdfl.fl6, + return fib6_rule_lookup(net, &rdfl.fl6, skb, flags, __ip6_route_redirect); }
@@@ -2338,7 -2268,7 +2342,7 @@@ void ip6_redirect(struct sk_buff *skb, fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid;
- dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); + dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); } @@@ -2360,7 -2290,7 +2364,7 @@@ void ip6_redirect_no_header(struct sk_b fl6.saddr = iph->daddr; fl6.flowi6_uid = sock_net_uid(net, NULL);
- dst = ip6_route_redirect(net, &fl6, &iph->saddr); + dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); } @@@ -2562,7 -2492,7 +2566,7 @@@ static struct rt6_info *ip6_nh_lookup_t flags |= RT6_LOOKUP_F_HAS_SADDR;
flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE; - rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); + rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
/* if table lookup failed, fall back to full lookup */ if (rt == net->ipv6.ip6_null_entry) { @@@ -2575,7 -2505,7 +2579,7 @@@
static int ip6_route_check_nh_onlink(struct net *net, struct fib6_config *cfg, - struct net_device *dev, + const struct net_device *dev, struct netlink_ext_ack *extack) { u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; @@@ -2625,7 -2555,7 +2629,7 @@@ static int ip6_route_check_nh(struct ne }
if (!grt) - grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); + grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
if (!grt) goto out; @@@ -2651,79 -2581,6 +2655,79 @@@ out return err; }
+static int ip6_validate_gw(struct net *net, struct fib6_config *cfg, + struct net_device **_dev, struct inet6_dev **idev, + struct netlink_ext_ack *extack) +{ + const struct in6_addr *gw_addr = &cfg->fc_gateway; + int gwa_type = ipv6_addr_type(gw_addr); + bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true; + const struct net_device *dev = *_dev; + bool need_addr_check = !dev; + int err = -EINVAL; + + /* if gw_addr is local we will fail to detect this in case + * address is still TENTATIVE (DAD in progress). rt6_lookup() + * will return already-added prefix route via interface that + * prefix route was assigned to, which might be non-loopback. + */ + if (dev && + ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) { + NL_SET_ERR_MSG(extack, "Gateway can not be a local address"); + goto out; + } + + if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) { + /* IPv6 strictly inhibits using not link-local + * addresses as nexthop address. + * Otherwise, router will not able to send redirects. + * It is very good, but in some (rare!) circumstances + * (SIT, PtP, NBMA NOARP links) it is handy to allow + * some exceptions. --ANK + * We allow IPv4-mapped nexthops to support RFC4798-type + * addressing + */ + if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) { + NL_SET_ERR_MSG(extack, "Invalid gateway address"); + goto out; + } + + if (cfg->fc_flags & RTNH_F_ONLINK) + err = ip6_route_check_nh_onlink(net, cfg, dev, extack); + else + err = ip6_route_check_nh(net, cfg, _dev, idev); + + if (err) + goto out; + } + + /* reload in case device was changed */ + dev = *_dev; + + err = -EINVAL; + if (!dev) { + NL_SET_ERR_MSG(extack, "Egress device not specified"); + goto out; + } else if (dev->flags & IFF_LOOPBACK) { + NL_SET_ERR_MSG(extack, + "Egress device can not be loopback device for this route"); + goto out; + } + + /* if we did not check gw_addr above, do so now that the + * egress device has been resolved. + */ + if (need_addr_check && + ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) { + NL_SET_ERR_MSG(extack, "Gateway can not be a local address"); + goto out; + } + + err = 0; +out: + return err; +} + static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, struct netlink_ext_ack *extack) { @@@ -2843,7 -2700,14 +2847,7 @@@ if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); - if (lwtunnel_output_redirect(rt->dst.lwtstate)) { - rt->dst.lwtstate->orig_output = rt->dst.output; - rt->dst.output = lwtunnel_output; - } - if (lwtunnel_input_redirect(rt->dst.lwtstate)) { - rt->dst.lwtstate->orig_input = rt->dst.input; - rt->dst.input = lwtunnel_input; - } + lwtunnel_set_redirect(&rt->dst); }
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); @@@ -2906,23 -2770,67 +2910,23 @@@ }
if (cfg->fc_flags & RTF_GATEWAY) { - const struct in6_addr *gw_addr; - int gwa_type; - - gw_addr = &cfg->fc_gateway; - gwa_type = ipv6_addr_type(gw_addr); - - /* if gw_addr is local we will fail to detect this in case - * address is still TENTATIVE (DAD in progress). rt6_lookup() - * will return already-added prefix route via interface that - * prefix route was assigned to, which might be non-loopback. - */ - err = -EINVAL; - if (ipv6_chk_addr_and_flags(net, gw_addr, - gwa_type & IPV6_ADDR_LINKLOCAL ? - dev : NULL, 0, 0)) { - NL_SET_ERR_MSG(extack, "Invalid gateway address"); + err = ip6_validate_gw(net, cfg, &dev, &idev, extack); + if (err) goto out; - } - rt->rt6i_gateway = *gw_addr; - - if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { - /* IPv6 strictly inhibits using not link-local - addresses as nexthop address. - Otherwise, router will not able to send redirects. - It is very good, but in some (rare!) circumstances - (SIT, PtP, NBMA NOARP links) it is handy to allow - some exceptions. --ANK - We allow IPv4-mapped nexthops to support RFC4798-type - addressing - */ - if (!(gwa_type & (IPV6_ADDR_UNICAST | - IPV6_ADDR_MAPPED))) { - NL_SET_ERR_MSG(extack, - "Invalid gateway address"); - goto out; - }
- if (cfg->fc_flags & RTNH_F_ONLINK) { - err = ip6_route_check_nh_onlink(net, cfg, dev, - extack); - } else { - err = ip6_route_check_nh(net, cfg, &dev, &idev); - } - if (err) - goto out; - } - err = -EINVAL; - if (!dev) { - NL_SET_ERR_MSG(extack, "Egress device not specified"); - goto out; - } else if (dev->flags & IFF_LOOPBACK) { - NL_SET_ERR_MSG(extack, - "Egress device can not be loopback device for this route"); - goto out; - } + rt->rt6i_gateway = cfg->fc_gateway; }
err = -ENODEV; if (!dev) goto out;
+ if (idev->cnf.disable_ipv6) { + NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device"); + err = -EACCES; + goto out; + } + if (!(dev->flags & IFF_UP)) { NL_SET_ERR_MSG(extack, "Nexthop device is not up"); err = -ENETDOWN; @@@ -4708,7 -4616,7 +4712,7 @@@ static int inet6_rtm_getroute(struct sk if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR;
- dst = ip6_route_input_lookup(net, dev, &fl6, flags); + dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
rcu_read_unlock(); } else { @@@ -5073,7 -4981,7 +5077,7 @@@ static int __net_init ip6_route_net_ini { #ifdef CONFIG_PROC_FS proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops); - proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops); + proc_create("rt6_stats", 0444, net->proc_net, &rt6_stats_seq_fops); #endif return 0; } diff --combined net/netfilter/nf_tables_api.c index 6e93782bbe4f,530e12ae52d7..9134cc429ad4 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -74,15 -74,77 +74,77 @@@ static void nft_trans_destroy(struct nf kfree(trans); }
+ /* removal requests are queued in the commit_list, but not acted upon + * until after all new rules are in place. + * + * Therefore, nf_register_net_hook(net, &nat_hook) runs before pending + * nf_unregister_net_hook(). + * + * nf_register_net_hook thus fails if a nat hook is already in place + * even if the conflicting hook is about to be removed. + * + * If collision is detected, search commit_log for DELCHAIN matching + * the new nat hooknum; if we find one collision is temporary: + * + * Either transaction is aborted (new/colliding hook is removed), or + * transaction is committed (old hook is removed). + */ + static bool nf_tables_allow_nat_conflict(const struct net *net, + const struct nf_hook_ops *ops) + { + const struct nft_trans *trans; + bool ret = false; + + if (!ops->nat_hook) + return false; + + list_for_each_entry(trans, &net->nft.commit_list, list) { + const struct nf_hook_ops *pending_ops; + const struct nft_chain *pending; + + if (trans->msg_type != NFT_MSG_NEWCHAIN && + trans->msg_type != NFT_MSG_DELCHAIN) + continue; + + pending = trans->ctx.chain; + if (!nft_is_base_chain(pending)) + continue; + + pending_ops = &nft_base_chain(pending)->ops; + if (pending_ops->nat_hook && + pending_ops->pf == ops->pf && + pending_ops->hooknum == ops->hooknum) { + /* other hook registration already pending? */ + if (trans->msg_type == NFT_MSG_NEWCHAIN) + return false; + + ret = true; + } + } + + return ret; + } + static int nf_tables_register_hook(struct net *net, const struct nft_table *table, struct nft_chain *chain) { + struct nf_hook_ops *ops; + int ret; + if (table->flags & NFT_TABLE_F_DORMANT || !nft_is_base_chain(chain)) return 0;
- return nf_register_net_hook(net, &nft_base_chain(chain)->ops); + ops = &nft_base_chain(chain)->ops; + ret = nf_register_net_hook(net, ops); + if (ret == -EBUSY && nf_tables_allow_nat_conflict(net, ops)) { + ops->nat_hook = false; + ret = nf_register_net_hook(net, ops); + ops->nat_hook = true; + } + + return ret; }
static void nf_tables_unregister_hook(struct net *net, @@@ -384,9 -446,9 +446,9 @@@ static inline u64 nf_tables_alloc_handl return ++table->hgenerator; }
-static const struct nf_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX]; +static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
-static const struct nf_chain_type * +static const struct nft_chain_type * __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family) { int i; @@@ -399,10 -461,10 +461,10 @@@ return NULL; }
-static const struct nf_chain_type * +static const struct nft_chain_type * nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family, bool autoload) { - const struct nf_chain_type *type; + const struct nft_chain_type *type;
type = __nf_tables_chain_type_lookup(nla, family); if (type != NULL) @@@ -859,22 -921,26 +921,22 @@@ static void nf_tables_table_destroy(str kfree(ctx->table); }
-int nft_register_chain_type(const struct nf_chain_type *ctype) +void nft_register_chain_type(const struct nft_chain_type *ctype) { - int err = 0; - if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO)) - return -EINVAL; + return;
nfnl_lock(NFNL_SUBSYS_NFTABLES); - if (chain_type[ctype->family][ctype->type] != NULL) { - err = -EBUSY; - goto out; + if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) { + nfnl_unlock(NFNL_SUBSYS_NFTABLES); + return; } chain_type[ctype->family][ctype->type] = ctype; -out: nfnl_unlock(NFNL_SUBSYS_NFTABLES); - return err; } EXPORT_SYMBOL_GPL(nft_register_chain_type);
-void nft_unregister_chain_type(const struct nf_chain_type *ctype) +void nft_unregister_chain_type(const struct nft_chain_type *ctype) { nfnl_lock(NFNL_SUBSYS_NFTABLES); chain_type[ctype->family][ctype->type] = NULL; @@@ -1211,23 -1277,17 +1273,21 @@@ static void nft_chain_stats_replace(str rcu_assign_pointer(chain->stats, newstats); }
-static void nf_tables_chain_destroy(struct nft_chain *chain) +static void nf_tables_chain_destroy(struct nft_ctx *ctx) { + struct nft_chain *chain = ctx->chain; + BUG_ON(chain->use > 0);
if (nft_is_base_chain(chain)) { struct nft_base_chain *basechain = nft_base_chain(chain);
+ if (basechain->type->free) + basechain->type->free(ctx); module_put(basechain->type->owner); free_percpu(basechain->stats); if (basechain->stats) static_branch_dec(&nft_counters_enabled); - if (basechain->ops.dev != NULL) - dev_put(basechain->ops.dev); kfree(chain->name); kfree(basechain); } else { @@@ -1239,7 -1299,7 +1299,7 @@@ struct nft_chain_hook { u32 num; s32 priority; - const struct nf_chain_type *type; + const struct nft_chain_type *type; struct net_device *dev; };
@@@ -1249,7 -1309,7 +1309,7 @@@ static int nft_chain_parse_hook(struct bool create) { struct nlattr *ha[NFTA_HOOK_MAX + 1]; - const struct nf_chain_type *type; + const struct nft_chain_type *type; struct net_device *dev; int err;
@@@ -1294,7 -1354,7 +1354,7 @@@ }
nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ); - dev = dev_get_by_name(net, ifname); + dev = __dev_get_by_name(net, ifname); if (!dev) { module_put(type->owner); return -ENOENT; @@@ -1311,8 -1371,6 +1371,6 @@@ static void nft_chain_release_hook(struct nft_chain_hook *hook) { module_put(hook->type->owner); - if (hook->dev != NULL) - dev_put(hook->dev); }
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, @@@ -1358,9 -1416,6 +1416,9 @@@ }
basechain->type = hook.type; + if (basechain->type->init) + basechain->type->init(ctx); + chain = &basechain->chain;
ops = &basechain->ops; @@@ -1381,8 -1436,6 +1439,8 @@@ if (chain == NULL) return -ENOMEM; } + ctx->chain = chain; + INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->table = table; @@@ -1396,6 -1449,7 +1454,6 @@@ if (err < 0) goto err1;
- ctx->chain = chain; err = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); if (err < 0) goto err2; @@@ -1407,7 -1461,7 +1465,7 @@@ err2: nf_tables_unregister_hook(net, table, chain); err1: - nf_tables_chain_destroy(chain); + nf_tables_chain_destroy(ctx);
return err; } @@@ -1915,6 -1969,7 +1973,7 @@@ static const struct nla_policy nft_rule [NFTA_RULE_POSITION] = { .type = NLA_U64 }, [NFTA_RULE_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, + [NFTA_RULE_ID] = { .type = NLA_U32 }, };
static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, @@@ -2450,6 -2505,9 +2509,9 @@@ EXPORT_SYMBOL_GPL(nft_unregister_set)
static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags) { + if ((flags & NFT_SET_EVAL) && !ops->update) + return false; + return (flags & ops->features) == (flags & NFT_SET_FEATURES); }
@@@ -2514,7 -2572,7 +2576,7 @@@ nft_select_set_ops(const struct nft_ct if (est.space == best.space && est.lookup < best.lookup) break; - } else if (est.size < best.size) { + } else if (est.size < best.size || !bops) { break; } continue; @@@ -2633,11 -2691,11 +2695,11 @@@ static struct nft_set *nf_tables_set_lo return ERR_PTR(-ENOENT); }
-struct nft_set *nft_set_lookup(const struct net *net, - const struct nft_table *table, - const struct nlattr *nla_set_name, - const struct nlattr *nla_set_id, - u8 genmask) +struct nft_set *nft_set_lookup_global(const struct net *net, + const struct nft_table *table, + const struct nlattr *nla_set_name, + const struct nlattr *nla_set_id, + u8 genmask) { struct nft_set *set;
@@@ -2650,7 -2708,7 +2712,7 @@@ } return set; } -EXPORT_SYMBOL_GPL(nft_set_lookup); +EXPORT_SYMBOL_GPL(nft_set_lookup_global);
static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, const char *name) @@@ -3319,6 -3377,8 +3381,8 @@@ static const struct nla_policy nft_set_ [NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 }, [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, + [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED }, + [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING }, };
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { @@@ -4032,10 -4092,17 +4096,10 @@@ static int nf_tables_newsetelem(struct if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], - genmask); - if (IS_ERR(set)) { - if (nla[NFTA_SET_ELEM_LIST_SET_ID]) { - set = nf_tables_set_lookup_byid(net, - nla[NFTA_SET_ELEM_LIST_SET_ID], - genmask); - } - if (IS_ERR(set)) - return PTR_ERR(set); - } + set = nft_set_lookup_global(net, ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + nla[NFTA_SET_ELEM_LIST_SET_ID], genmask); + if (IS_ERR(set)) + return PTR_ERR(set);
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY; @@@ -4325,9 -4392,9 +4389,9 @@@ struct nft_object *nf_tables_obj_lookup } EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
-struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table, - const struct nlattr *nla, - u32 objtype, u8 genmask) +static struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table, + const struct nlattr *nla, + u32 objtype, u8 genmask) { struct nft_object *obj;
@@@ -4354,20 -4421,16 +4418,20 @@@ static struct nft_object *nft_obj_init( const struct nft_object_type *type, const struct nlattr *attr) { - struct nlattr *tb[type->maxattr + 1]; + struct nlattr **tb; const struct nft_object_ops *ops; struct nft_object *obj; - int err; + int err = -ENOMEM; + + tb = kmalloc_array(type->maxattr + 1, sizeof(*tb), GFP_KERNEL); + if (!tb) + goto err1;
if (attr) { err = nla_parse_nested(tb, type->maxattr, attr, type->policy, NULL); if (err < 0) - goto err1; + goto err2; } else { memset(tb, 0, sizeof(tb[0]) * (type->maxattr + 1)); } @@@ -4376,7 -4439,7 +4440,7 @@@ ops = type->select_ops(ctx, (const struct nlattr * const *)tb); if (IS_ERR(ops)) { err = PTR_ERR(ops); - goto err1; + goto err2; } } else { ops = type->ops; @@@ -4384,21 -4447,18 +4448,21 @@@
err = -ENOMEM; obj = kzalloc(sizeof(*obj) + ops->size, GFP_KERNEL); - if (obj == NULL) - goto err1; + if (!obj) + goto err2;
err = ops->init(ctx, (const struct nlattr * const *)tb, obj); if (err < 0) - goto err2; + goto err3;
obj->ops = ops;
+ kfree(tb); return obj; -err2: +err3: kfree(obj); +err2: + kfree(tb); err1: return ERR_PTR(err); } @@@ -4854,7 -4914,7 +4918,7 @@@ struct nft_flowtable *nf_tables_flowtab } EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
-struct nft_flowtable * +static struct nft_flowtable * nf_tables_flowtable_lookup_byhandle(const struct nft_table *table, const struct nlattr *nla, u8 genmask) { @@@ -4868,8 -4928,6 +4932,6 @@@ return ERR_PTR(-ENOENT); }
- #define NFT_FLOWTABLE_DEVICE_MAX 8 - static int nf_tables_parse_devices(const struct nft_ctx *ctx, const struct nlattr *attr, struct net_device *dev_array[], int *len) @@@ -4886,7 -4944,7 +4948,7 @@@ }
nla_strlcpy(ifname, tmp, IFNAMSIZ); - dev = dev_get_by_name(ctx->net, ifname); + dev = __dev_get_by_name(ctx->net, ifname); if (!dev) { err = -ENOENT; goto err1; @@@ -4942,13 -5000,11 +5004,11 @@@ static int nf_tables_flowtable_parse_ho err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS], dev_array, &n); if (err < 0) - goto err1; + return err;
ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL); - if (!ops) { - err = -ENOMEM; - goto err1; - } + if (!ops) + return -ENOMEM;
flowtable->hooknum = hooknum; flowtable->priority = priority; @@@ -4962,13 -5018,10 +5022,10 @@@ flowtable->ops[i].priv = &flowtable->data.rhashtable; flowtable->ops[i].hook = flowtable->data.type->hook; flowtable->ops[i].dev = dev_array[i]; + flowtable->dev_name[i] = kstrdup(dev_array[i]->name, + GFP_KERNEL); }
- err = 0; - err1: - for (i = 0; i < n; i++) - dev_put(dev_array[i]); - return err; }
@@@ -5139,8 -5192,10 +5196,10 @@@ static int nf_tables_newflowtable(struc err5: i = flowtable->ops_len; err4: - for (k = i - 1; k >= 0; k--) + for (k = i - 1; k >= 0; k--) { + kfree(flowtable->dev_name[k]); nf_unregister_net_hook(net, &flowtable->ops[k]); + }
kfree(flowtable->ops); err3: @@@ -5230,9 -5285,9 +5289,9 @@@ static int nf_tables_fill_flowtable_inf goto nla_put_failure;
for (i = 0; i < flowtable->ops_len; i++) { - if (flowtable->ops[i].dev && + if (flowtable->dev_name[i][0] && nla_put_string(skb, NFTA_DEVICE_NAME, - flowtable->ops[i].dev->name)) + flowtable->dev_name[i])) goto nla_put_failure; } nla_nest_end(skb, nest_devs); @@@ -5474,6 -5529,7 +5533,7 @@@ static void nft_flowtable_event(unsigne continue;
nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); + flowtable->dev_name[i][0] = '\0'; flowtable->ops[i].dev = NULL; break; } @@@ -5701,7 -5757,7 +5761,7 @@@ static void nf_tables_commit_release(st nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_DELCHAIN: - nf_tables_chain_destroy(trans->ctx.chain); + nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_DELRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); @@@ -5872,7 -5928,7 +5932,7 @@@ static void nf_tables_abort_release(str nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_NEWCHAIN: - nf_tables_chain_destroy(trans->ctx.chain); + nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_NEWRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); @@@ -6019,7 -6075,7 +6079,7 @@@ static const struct nfnetlink_subsyste };
int nft_chain_validate_dependency(const struct nft_chain *chain, - enum nft_chain_type type) + enum nft_chain_types type) { const struct nft_base_chain *basechain;
@@@ -6522,7 -6578,7 +6582,7 @@@ int __nft_release_basechain(struct nft_ } list_del(&ctx->chain->list); ctx->table->use--; - nf_tables_chain_destroy(ctx->chain); + nf_tables_chain_destroy(ctx);
return 0; } @@@ -6538,7 -6594,6 +6598,7 @@@ static void __nft_release_tables(struc struct nft_set *set, *ns; struct nft_ctx ctx = { .net = net, + .family = NFPROTO_NETDEV, };
list_for_each_entry_safe(table, nt, &net->nft.tables, list) { @@@ -6575,10 -6630,9 +6635,10 @@@ nft_obj_destroy(obj); } list_for_each_entry_safe(chain, nc, &table->chains, list) { + ctx.chain = chain; list_del(&chain->list); table->use--; - nf_tables_chain_destroy(chain); + nf_tables_chain_destroy(&ctx); } list_del(&table->list); nf_tables_table_destroy(&ctx); @@@ -6609,8 -6663,6 +6669,8 @@@ static int __init nf_tables_module_init { int err;
+ nft_chain_filter_init(); + info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS, GFP_KERNEL); if (info == NULL) { @@@ -6645,7 -6697,6 +6705,7 @@@ static void __exit nf_tables_module_exi rcu_barrier(); nf_tables_core_module_exit(); kfree(info); + nft_chain_filter_fini(); }
module_init(nf_tables_module_init); diff --combined net/netlink/af_netlink.c index f1b02d87e336,70c455341243..fa556fdef57d --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -1085,6 -1085,9 +1085,9 @@@ static int netlink_connect(struct socke if (addr->sa_family != AF_NETLINK) return -EINVAL;
+ if (alen < sizeof(struct sockaddr_nl)) + return -EINVAL; + if ((nladdr->nl_groups || nladdr->nl_pid) && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM; @@@ -1105,7 -1108,7 +1108,7 @@@ }
static int netlink_getname(struct socket *sock, struct sockaddr *addr, - int *addr_len, int peer) + int peer) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); @@@ -1113,6 -1116,7 +1116,6 @@@
nladdr->nl_family = AF_NETLINK; nladdr->nl_pad = 0; - *addr_len = sizeof(*nladdr);
if (peer) { nladdr->nl_pid = nlk->dst_portid; @@@ -1123,7 -1127,7 +1126,7 @@@ nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; netlink_unlock_table(); } - return 0; + return sizeof(*nladdr); }
static int netlink_ioctl(struct socket *sock, unsigned int cmd, diff --combined net/sched/act_api.c index 0d78b58e1898,efc6bfb9a4e0..72251241665a --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@@ -109,42 -109,6 +109,42 @@@ int __tcf_idr_release(struct tc_action } EXPORT_SYMBOL(__tcf_idr_release);
+static size_t tcf_action_shared_attrs_size(const struct tc_action *act) +{ + u32 cookie_len = 0; + + if (act->act_cookie) + cookie_len = nla_total_size(act->act_cookie->len); + + return nla_total_size(0) /* action number nested */ + + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ + + cookie_len /* TCA_ACT_COOKIE */ + + nla_total_size(0) /* TCA_ACT_STATS nested */ + /* TCA_STATS_BASIC */ + + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) + /* TCA_STATS_QUEUE */ + + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) + + nla_total_size(0) /* TCA_OPTIONS nested */ + + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ +} + +static size_t tcf_action_full_attrs_size(size_t sz) +{ + return NLMSG_HDRLEN /* struct nlmsghdr */ + + sizeof(struct tcamsg) + + nla_total_size(0) /* TCA_ACT_TAB nested */ + + sz; +} + +static size_t tcf_action_fill_size(const struct tc_action *act) +{ + size_t sz = tcf_action_shared_attrs_size(act); + + if (act->ops->get_fill_size) + return act->ops->get_fill_size(act) + sz; + return sz; +} + static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, struct netlink_callback *cb) { @@@ -171,8 -135,10 +171,10 @@@ continue;
nest = nla_nest_start(skb, n_i); - if (!nest) + if (!nest) { + index--; goto nla_put_failure; + } err = tcf_action_dump_1(skb, p, 0, 0); if (err < 0) { index--; @@@ -238,8 -204,7 +240,8 @@@ nla_put_failure
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, struct netlink_callback *cb, int type, - const struct tc_action_ops *ops) + const struct tc_action_ops *ops, + struct netlink_ext_ack *extack) { struct tcf_idrinfo *idrinfo = tn->idrinfo;
@@@ -248,8 -213,7 +250,8 @@@ } else if (type == RTM_GETACTION) { return tcf_dump_walker(idrinfo, skb, cb); } else { - WARN(1, "tcf_generic_walker: unknown action %d\n", type); + WARN(1, "tcf_generic_walker: unknown command %d\n", type); + NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); return -EINVAL; } } @@@ -296,6 -260,14 +298,6 @@@ bool tcf_idr_check(struct tc_action_ne } EXPORT_SYMBOL(tcf_idr_check);
-void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est) -{ - if (est) - gen_kill_estimator(&a->tcfa_rate_est); - free_tcf(a); -} -EXPORT_SYMBOL(tcf_idr_cleanup); - int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int bind, bool cpustats) @@@ -635,8 -607,7 +637,8 @@@ static struct tc_cookie *nla_memdup_coo
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, - char *name, int ovr, int bind) + char *name, int ovr, int bind, + struct netlink_ext_ack *extack) { struct tc_action *a; struct tc_action_ops *a_o; @@@ -647,40 -618,31 +649,40 @@@ int err;
if (name == NULL) { - err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; - if (kind == NULL) + if (!kind) { + NL_SET_ERR_MSG(extack, "TC action kind must be specified"); goto err_out; - if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) + } + if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) { + NL_SET_ERR_MSG(extack, "TC action name too long"); goto err_out; + } if (tb[TCA_ACT_COOKIE]) { int cklen = nla_len(tb[TCA_ACT_COOKIE]);
- if (cklen > TC_COOKIE_MAX_SIZE) + if (cklen > TC_COOKIE_MAX_SIZE) { + NL_SET_ERR_MSG(extack, "TC cookie size above the maximum"); goto err_out; + }
cookie = nla_memdup_cookie(tb); if (!cookie) { + NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); err = -ENOMEM; goto err_out; } } } else { - err = -EINVAL; - if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) + if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) { + NL_SET_ERR_MSG(extack, "TC action name too long"); + err = -EINVAL; goto err_out; + } }
a_o = tc_lookup_action_n(act_name); @@@ -703,17 -665,15 +705,17 @@@ goto err_mod; } #endif + NL_SET_ERR_MSG(extack, "Failed to load TC action module"); err = -ENOENT; goto err_out; }
/* backward compatibility for policer */ if (name == NULL) - err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind); + err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, + extack); else - err = a_o->init(net, nla, est, &a, ovr, bind); + err = a_o->init(net, nla, est, &a, ovr, bind, extack); if (err < 0) goto err_mod;
@@@ -739,7 -699,6 +741,7 @@@
list_add_tail(&a->list, &actions); tcf_action_destroy(&actions, bind); + NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); return ERR_PTR(err); } } @@@ -769,35 -728,29 +771,35 @@@ static void cleanup_a(struct list_head
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct list_head *actions) + struct list_head *actions, size_t *attr_size, + struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; + size_t sz = 0; int err; int i;
- err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); if (err < 0) return err;
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { - act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind); + act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, + extack); if (IS_ERR(act)) { err = PTR_ERR(act); goto err; } act->order = i; + sz += tcf_action_fill_size(act); if (ovr) act->tcfa_refcnt++; list_add_tail(&act->list, actions); }
+ *attr_size = tcf_action_full_attrs_size(sz); + /* Remove the temp refcnt which was necessary to protect against * destroying an existing action which was being replaced */ @@@ -871,7 -824,7 +873,7 @@@ static int tca_get_fill(struct sk_buff t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB); - if (nest == NULL) + if (!nest) goto out_nlmsg_trim;
if (tcf_action_dump(skb, actions, bind, ref) < 0) @@@ -889,8 -842,7 +891,8 @@@ out_nlmsg_trim
static int tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, - struct list_head *actions, int event) + struct list_head *actions, int event, + struct netlink_ext_ack *extack) { struct sk_buff *skb;
@@@ -899,7 -851,6 +901,7 @@@ return -ENOBUFS; if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); kfree_skb(skb); return -EINVAL; } @@@ -908,8 -859,7 +910,8 @@@ }
static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, - struct nlmsghdr *n, u32 portid) + struct nlmsghdr *n, u32 portid, + struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_ACT_MAX + 1]; const struct tc_action_ops *ops; @@@ -917,26 -867,22 +919,26 @@@ int index; int err;
- err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack); if (err < 0) goto err_out;
err = -EINVAL; if (tb[TCA_ACT_INDEX] == NULL || - nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) + nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { + NL_SET_ERR_MSG(extack, "Invalid TC action index value"); goto err_out; + } index = nla_get_u32(tb[TCA_ACT_INDEX]);
err = -EINVAL; ops = tc_lookup_action(tb[TCA_ACT_KIND]); - if (!ops) /* could happen in batch of actions */ + if (!ops) { /* could happen in batch of actions */ + NL_SET_ERR_MSG(extack, "Specified TC action not found"); goto err_out; + } err = -ENOENT; - if (ops->lookup(net, &a, index) == 0) + if (ops->lookup(net, &a, index, extack) == 0) goto err_mod;
module_put(ops->owner); @@@ -949,8 -895,7 +951,8 @@@ err_out }
static int tca_action_flush(struct net *net, struct nlattr *nla, - struct nlmsghdr *n, u32 portid) + struct nlmsghdr *n, u32 portid, + struct netlink_ext_ack *extack) { struct sk_buff *skb; unsigned char *b; @@@ -964,45 -909,39 +966,45 @@@ int err = -ENOMEM;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) { - pr_debug("tca_action_flush: failed skb alloc\n"); + if (!skb) return err; - }
b = skb_tail_pointer(skb);
- err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack); if (err < 0) goto err_out;
err = -EINVAL; kind = tb[TCA_ACT_KIND]; ops = tc_lookup_action(kind); - if (!ops) /*some idjot trying to flush unknown action */ + if (!ops) { /*some idjot trying to flush unknown action */ + NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); goto err_out; + }
nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); - if (!nlh) + if (!nlh) { + NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); goto out_module_put; + } t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB); - if (nest == NULL) + if (!nest) { + NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); goto out_module_put; + }
- err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); - if (err <= 0) + err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); + if (err <= 0) { + nla_nest_cancel(skb, nest); goto out_module_put; + }
nla_nest_end(skb, nest);
@@@ -1013,8 -952,6 +1015,8 @@@ n->nlmsg_flags & NLM_F_ECHO); if (err > 0) return 0; + if (err < 0) + NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
return err;
@@@ -1027,19 -964,17 +1029,19 @@@ err_out
static int tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, - u32 portid) + u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { int ret; struct sk_buff *skb;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, + GFP_KERNEL); if (!skb) return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 0, 1) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); kfree_skb(skb); return -EINVAL; } @@@ -1047,7 -982,6 +1049,7 @@@ /* now do the delete */ ret = tcf_action_destroy(actions, 0); if (ret < 0) { + NL_SET_ERR_MSG(extack, "Failed to delete TC action"); kfree_skb(skb); return ret; } @@@ -1061,43 -995,38 +1063,43 @@@
static int tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, - u32 portid, int event) + u32 portid, int event, struct netlink_ext_ack *extack) { int i, ret; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; + size_t attr_size = 0; LIST_HEAD(actions);
- ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); + ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); if (ret < 0) return ret;
if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { - if (tb[1] != NULL) - return tca_action_flush(net, tb[1], n, portid); - else - return -EINVAL; + if (tb[1]) + return tca_action_flush(net, tb[1], n, portid, extack); + + NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); + return -EINVAL; }
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { - act = tcf_action_get_1(net, tb[i], n, portid); + act = tcf_action_get_1(net, tb[i], n, portid, extack); if (IS_ERR(act)) { ret = PTR_ERR(act); goto err; } act->order = i; + attr_size += tcf_action_fill_size(act); list_add_tail(&act->list, &actions); }
+ attr_size = tcf_action_full_attrs_size(attr_size); + if (event == RTM_GETACTION) - ret = tcf_get_notify(net, portid, n, &actions, event); + ret = tcf_get_notify(net, portid, n, &actions, event, extack); else { /* delete */ - ret = tcf_del_notify(net, n, &actions, portid); + ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack); if (ret) goto err; return ret; @@@ -1110,19 -1039,17 +1112,19 @@@ err
static int tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, - u32 portid) + u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { struct sk_buff *skb; int err = 0;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, + GFP_KERNEL); if (!skb) return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, RTM_NEWACTION, 0, 0) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); kfree_skb(skb); return -EINVAL; } @@@ -1135,19 -1062,16 +1137,19 @@@ }
static int tcf_action_add(struct net *net, struct nlattr *nla, - struct nlmsghdr *n, u32 portid, int ovr) + struct nlmsghdr *n, u32 portid, int ovr, + struct netlink_ext_ack *extack) { + size_t attr_size = 0; int ret = 0; LIST_HEAD(actions);
- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions); + ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions, + &attr_size, extack); if (ret) return ret;
- return tcf_add_notify(net, n, &actions, portid); + return tcf_add_notify(net, n, &actions, portid, attr_size, extack); }
static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; @@@ -1175,7 -1099,7 +1177,7 @@@ static int tc_ctl_action(struct sk_buf return ret;
if (tca[TCA_ACT_TAB] == NULL) { - pr_notice("tc_ctl_action: received NO action attribs\n"); + NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); return -EINVAL; }
@@@ -1191,18 -1115,17 +1193,18 @@@ if (n->nlmsg_flags & NLM_F_REPLACE) ovr = 1; replay: - ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr); + ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, + extack); if (ret == -EAGAIN) goto replay; break; case RTM_DELACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, - portid, RTM_DELACTION); + portid, RTM_DELACTION, extack); break; case RTM_GETACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, - portid, RTM_GETACTION); + portid, RTM_GETACTION, extack); break; default: BUG(); @@@ -1297,7 -1220,7 +1299,7 @@@ static int tc_dump_action(struct sk_buf if (nest == NULL) goto out_module_put;
- ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o); + ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); if (ret < 0) goto out_module_put;
diff --combined net/smc/smc_clc.c index 64fbc3230e6c,15c213250606..3a988c22f627 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@@ -5,17 -5,15 +5,17 @@@ * CLC (connection layer control) handshake over initial TCP socket to * prepare for RDMA traffic * - * Copyright IBM Corp. 2016 + * Copyright IBM Corp. 2016, 2018 * * Author(s): Ursula Braun ubraun@linux.vnet.ibm.com */
#include <linux/in.h> +#include <linux/inetdevice.h> #include <linux/if_ether.h> #include <linux/sched/signal.h>
+#include <net/addrconf.h> #include <net/sock.h> #include <net/tcp.h>
@@@ -24,9 -22,6 +24,9 @@@ #include "smc_clc.h" #include "smc_ib.h"
+/* eye catcher "SMCR" EBCDIC for CLC messages */ +static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; + /* check if received message has a correct header length and contains valid * heading and trailing eyecatchers */ @@@ -75,172 -70,6 +75,172 @@@ static bool smc_clc_msg_hdr_valid(struc return true; }
+/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */ +static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4, + struct smc_clc_msg_proposal_prefix *prop) +{ + struct in_device *in_dev = __in_dev_get_rcu(dst->dev); + + if (!in_dev) + return -ENODEV; + for_ifa(in_dev) { + if (!inet_ifa_match(ipv4, ifa)) + continue; + prop->prefix_len = inet_mask_len(ifa->ifa_mask); + prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask; + /* prop->ipv6_prefixes_cnt = 0; already done by memset before */ + return 0; + } endfor_ifa(in_dev); + return -ENOENT; +} + +/* fill CLC proposal msg with ipv6 prefixes from device */ +static int smc_clc_prfx_set6_rcu(struct dst_entry *dst, + struct smc_clc_msg_proposal_prefix *prop, + struct smc_clc_ipv6_prefix *ipv6_prfx) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_dev *in6_dev = __in6_dev_get(dst->dev); + struct inet6_ifaddr *ifa; + int cnt = 0; + + if (!in6_dev) + return -ENODEV; + /* use a maximum of 8 IPv6 prefixes from device */ + list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { + if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL) + continue; + ipv6_addr_prefix(&ipv6_prfx[cnt].prefix, + &ifa->addr, ifa->prefix_len); + ipv6_prfx[cnt].prefix_len = ifa->prefix_len; + cnt++; + if (cnt == SMC_CLC_MAX_V6_PREFIX) + break; + } + prop->ipv6_prefixes_cnt = cnt; + if (cnt) + return 0; +#endif + return -ENOENT; +} + +/* retrieve and set prefixes in CLC proposal msg */ +static int smc_clc_prfx_set(struct socket *clcsock, + struct smc_clc_msg_proposal_prefix *prop, + struct smc_clc_ipv6_prefix *ipv6_prfx) +{ + struct dst_entry *dst = sk_dst_get(clcsock->sk); + struct sockaddr_storage addrs; + struct sockaddr_in6 *addr6; + struct sockaddr_in *addr; + int rc = -ENOENT; + + memset(prop, 0, sizeof(*prop)); + if (!dst) { + rc = -ENOTCONN; + goto out; + } + if (!dst->dev) { + rc = -ENODEV; + goto out_rel; + } + /* get address to which the internal TCP socket is bound */ + kernel_getsockname(clcsock, (struct sockaddr *)&addrs); + /* analyze IP specific data of net_device belonging to TCP socket */ + addr6 = (struct sockaddr_in6 *)&addrs; + rcu_read_lock(); + if (addrs.ss_family == PF_INET) { + /* IPv4 */ + addr = (struct sockaddr_in *)&addrs; + rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop); + } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) { + /* mapped IPv4 address - peer is IPv4 only */ + rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3], + prop); + } else { + /* IPv6 */ + rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx); + } + rcu_read_unlock(); +out_rel: + dst_release(dst); +out: + return rc; +} + +/* match ipv4 addrs of dev against addr in CLC proposal */ +static int smc_clc_prfx_match4_rcu(struct net_device *dev, + struct smc_clc_msg_proposal_prefix *prop) +{ + struct in_device *in_dev = __in_dev_get_rcu(dev); + + if (!in_dev) + return -ENODEV; + for_ifa(in_dev) { + if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) && + inet_ifa_match(prop->outgoing_subnet, ifa)) + return 0; + } endfor_ifa(in_dev); + + return -ENOENT; +} + +/* match ipv6 addrs of dev against addrs in CLC proposal */ +static int smc_clc_prfx_match6_rcu(struct net_device *dev, + struct smc_clc_msg_proposal_prefix *prop) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_dev *in6_dev = __in6_dev_get(dev); + struct smc_clc_ipv6_prefix *ipv6_prfx; + struct inet6_ifaddr *ifa; + int i, max; + + if (!in6_dev) + return -ENODEV; + /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */ + ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop)); + max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX); + list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { + if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL) + continue; + for (i = 0; i < max; i++) { + if (ifa->prefix_len == ipv6_prfx[i].prefix_len && + ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix, + ifa->prefix_len)) + return 0; + } + } +#endif + return -ENOENT; +} + +/* check if proposed prefixes match one of our device prefixes */ +int smc_clc_prfx_match(struct socket *clcsock, + struct smc_clc_msg_proposal_prefix *prop) +{ + struct dst_entry *dst = sk_dst_get(clcsock->sk); + int rc; + + if (!dst) { + rc = -ENOTCONN; + goto out; + } + if (!dst->dev) { + rc = -ENODEV; + goto out_rel; + } + rcu_read_lock(); + if (!prop->ipv6_prefixes_cnt) + rc = smc_clc_prfx_match4_rcu(dst->dev, prop); + else + rc = smc_clc_prfx_match6_rcu(dst->dev, prop); + rcu_read_unlock(); +out_rel: + dst_release(dst); +out: + return rc; +} + /* Wait for data on the tcp-socket, analyze received data * Returns: * 0 if success and it was not a decline that we received. @@@ -304,7 -133,7 +304,7 @@@ int smc_clc_wait_msg(struct smc_sock *s
/* receive the complete CLC message */ memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen); + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); krflags = MSG_WAITALL; smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; len = sock_recvmsg(smc->clcsock, &msg, krflags); @@@ -360,24 -189,16 +360,24 @@@ int smc_clc_send_proposal(struct smc_so struct smc_ib_device *smcibdev, u8 ibport) { + struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX]; struct smc_clc_msg_proposal_prefix pclc_prfx; struct smc_clc_msg_proposal pclc; struct smc_clc_msg_trail trl; + int len, i, plen, rc; int reason_code = 0; - struct kvec vec[3]; + struct kvec vec[4]; struct msghdr msg; - int len, plen, rc; + + /* retrieve ip prefixes for CLC proposal msg */ + rc = smc_clc_prfx_set(smc->clcsock, &pclc_prfx, ipv6_prfx); + if (rc) + return SMC_CLC_DECL_CNFERR; /* configuration error */
/* send SMC Proposal CLC message */ - plen = sizeof(pclc) + sizeof(pclc_prfx) + sizeof(trl); + plen = sizeof(pclc) + sizeof(pclc_prfx) + + (pclc_prfx.ipv6_prefixes_cnt * sizeof(ipv6_prfx[0])) + + sizeof(trl); memset(&pclc, 0, sizeof(pclc)); memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); pclc.hdr.type = SMC_CLC_PROPOSAL; @@@ -388,22 -209,23 +388,22 @@@ memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN); pclc.iparea_offset = htons(0);
- memset(&pclc_prfx, 0, sizeof(pclc_prfx)); - /* determine subnet and mask from internal TCP socket */ - rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc_prfx.outgoing_subnet, - &pclc_prfx.prefix_len); - if (rc) - return SMC_CLC_DECL_CNFERR; /* configuration error */ - pclc_prfx.ipv6_prefixes_cnt = 0; memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); memset(&msg, 0, sizeof(msg)); - vec[0].iov_base = &pclc; - vec[0].iov_len = sizeof(pclc); - vec[1].iov_base = &pclc_prfx; - vec[1].iov_len = sizeof(pclc_prfx); - vec[2].iov_base = &trl; - vec[2].iov_len = sizeof(trl); + i = 0; + vec[i].iov_base = &pclc; + vec[i++].iov_len = sizeof(pclc); + vec[i].iov_base = &pclc_prfx; + vec[i++].iov_len = sizeof(pclc_prfx); + if (pclc_prfx.ipv6_prefixes_cnt > 0) { + vec[i].iov_base = &ipv6_prfx[0]; + vec[i++].iov_len = pclc_prfx.ipv6_prefixes_cnt * + sizeof(ipv6_prfx[0]); + } + vec[i].iov_base = &trl; + vec[i++].iov_len = sizeof(trl); /* due to the few bytes needed for clc-handshake this cannot block */ - len = kernel_sendmsg(smc->clcsock, &msg, vec, 3, plen); + len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); if (len < sizeof(pclc)) { if (len >= 0) { reason_code = -ENETUNREACH; diff --combined net/xfrm/xfrm_input.c index 44fc54dc013c,81788105c164..352abca2605f --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@@ -9,7 -9,6 +9,7 @@@ */
#include <linux/bottom_half.h> +#include <linux/cache.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/module.h> @@@ -27,12 -26,18 +27,18 @@@ struct xfrm_trans_tasklet };
struct xfrm_trans_cb { + union { + struct inet_skb_parm h4; + #if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; + #endif + } header; int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); };
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
-static struct kmem_cache *secpath_cachep __read_mostly; +static struct kmem_cache *secpath_cachep __ro_after_init;
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
linux-merge@lists.open-mesh.org