The following commit has been merged in the master branch: commit 97c78d0af55fff206947a5f2b85b690b5acf28ce Merge: deecae7d96843fceebae06445b3f4bf8cceca31a 73367f05b25dbd064061aee780638564d15b01d1 Author: Jakub Kicinski kuba@kernel.org Date: Thu Aug 26 13:45:47 2021 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/wwan/mhi_wwan_mbim.c - drop the extra arg.
Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --combined MAINTAINERS index 06e39d3eba93,d7b4f32875a9..6abfd3e36c31 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -3197,7 -3197,7 +3197,7 @@@ S: Maintaine W: https://www.open-mesh.org/ Q: https://patchwork.open-mesh.org/project/batman/list/ B: https://www.open-mesh.org/projects/batman-adv/issues -C: irc://chat.freenode.net/batman +C: ircs://irc.hackint.org/batadv T: git https://git.open-mesh.org/linux-merge.git F: Documentation/networking/batman-adv.rst F: include/uapi/linux/batadv_packet.h @@@ -3866,6 -3866,16 +3866,16 @@@ L: bcm-kernel-feedback-list@broadcom.co S: Maintained F: drivers/mtd/nand/raw/brcmnand/
+ BROADCOM STB PCIE DRIVER + M: Jim Quinlan jim2101024@gmail.com + M: Nicolas Saenz Julienne nsaenz@kernel.org + M: Florian Fainelli f.fainelli@gmail.com + M: bcm-kernel-feedback-list@broadcom.com + L: linux-pci@vger.kernel.org + S: Maintained + F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml + F: drivers/pci/controller/pcie-brcmstb.c + BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: bcm-kernel-feedback-list@broadcom.com @@@ -4498,7 -4508,7 +4508,7 @@@ L: clang-built-linux@googlegroups.co S: Supported W: https://clangbuiltlinux.github.io/ B: https://github.com/ClangBuiltLinux/linux/issues - C: irc://chat.freenode.net/clangbuiltlinux + C: irc://irc.libera.chat/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: include/linux/compiler-clang.h F: scripts/clang-tools/ @@@ -5684,7 -5694,6 +5694,7 @@@ DPAA2 ETHERNET SWITCH DRIVE M: Ioana Ciornei ioana.ciornei@nxp.com L: netdev@vger.kernel.org S: Maintained +F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst F: drivers/net/ethernet/freescale/dpaa2/dpaa2-switch* F: drivers/net/ethernet/freescale/dpaa2/dpsw*
@@@ -6905,12 -6914,6 +6915,12 @@@ M: Mark Einon <mark.einon@gmail.com S: Odd Fixes F: drivers/net/ethernet/agere/
+ETAS ES58X CAN/USB DRIVER +M: Vincent Mailhol mailhol.vincent@wanadoo.fr +L: linux-can@vger.kernel.org +S: Maintained +F: drivers/net/can/usb/etas_es58x/ + ETHERNET BRIDGE M: Roopa Prabhu roopa@nvidia.com M: Nikolay Aleksandrov nikolay@nvidia.com @@@ -6952,7 -6955,7 +6962,7 @@@ F: include/uapi/linux/mdio. F: include/uapi/linux/mii.h
EXFAT FILE SYSTEM - M: Namjae Jeon namjae.jeon@samsung.com + M: Namjae Jeon linkinjeon@kernel.org M: Sungjong Seo sj1557.seo@samsung.com L: linux-fsdevel@vger.kernel.org S: Maintained @@@ -9756,6 -9759,11 +9766,6 @@@ M: David Sterba <dsterba@suse.com S: Odd Fixes F: drivers/tty/ipwireless/
-IPX NETWORK LAYER -L: netdev@vger.kernel.org -S: Obsolete -F: include/uapi/linux/ipx.h - IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) M: Marc Zyngier maz@kernel.org S: Maintained @@@ -10390,7 -10398,6 +10400,7 @@@ F: net/core/skmsg. F: net/core/sock_map.c F: net/ipv4/tcp_bpf.c F: net/ipv4/udp_bpf.c +F: net/unix/unix_bpf.c
LANDLOCK SECURITY MODULE M: Micka��l Sala��n mic@digikod.net @@@ -11033,18 -11040,6 +11043,18 @@@ F: drivers/mailbox/arm_mhuv2. F: include/linux/mailbox/arm_mhuv2_message.h F: Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
+MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP) +M: Jeremy Kerr jk@codeconstruct.com.au +M: Matt Johnston matt@codeconstruct.com.au +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/networking/mctp.rst +F: drivers/net/mctp/ +F: include/net/mctp.h +F: include/net/mctpdevice.h +F: include/net/netns/mctp.h +F: net/mctp/ + MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 M: Michael Kerrisk mtk.manpages@gmail.com L: linux-man@vger.kernel.org @@@ -11342,12 -11337,6 +11352,12 @@@ W: https://linuxtv.or T: git git://linuxtv.org/media_tree.git F: drivers/media/radio/radio-maxiradio*
+MAXLINEAR ETHERNET PHY DRIVER +M: Xu Liang lxu@maxlinear.com +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/phy/mxl-gpy.c + MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER R: Yasushi SHOJI yashi@spacecubics.com L: linux-can@vger.kernel.org @@@ -13891,12 -13880,6 +13901,12 @@@ F: Documentation/devicetree F: arch/*/boot/dts/ F: include/dt-bindings/
+OPENCOMPUTE PTP CLOCK DRIVER +M: Jonathan Lemon jonathan.lemon@gmail.com +L: netdev@vger.kernel.org +S: Maintained +F: drivers/ptp/ptp_ocp.c + OPENCORES I2C BUS DRIVER M: Peter Korsgaard peter@korsgaard.com M: Andrew Lunn andrew@lunn.ch @@@ -14457,6 -14440,13 +14467,13 @@@ S: Maintaine F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c
+ PCIE DRIVER FOR INTEL LGM GW SOC + M: Rahul Tanwar rtanwar@maxlinear.com + L: linux-pci@vger.kernel.org + S: Maintained + F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml + F: drivers/pci/controller/dwc/pcie-intel-gw.c + PCIE DRIVER FOR MEDIATEK M: Ryder Lee ryder.lee@mediatek.com M: Jianjun Wang jianjun.wang@mediatek.com @@@ -14953,6 -14943,13 +14970,6 @@@ S: Maintaine F: include/linux/printk.h F: kernel/printk/
-PRISM54 WIRELESS DRIVER -M: Luis Chamberlain mcgrof@kernel.org -L: linux-wireless@vger.kernel.org -S: Obsolete -W: https://wireless.wiki.kernel.org/en/users/Drivers/p54 -F: drivers/net/wireless/intersil/prism54/ - PROC FILESYSTEM L: linux-kernel@vger.kernel.org L: linux-fsdevel@vger.kernel.org diff --combined drivers/infiniband/hw/mlx5/main.c index ae05e143401c,2507051f7b89..466f0a521940 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -126,7 -126,6 +126,7 @@@ static int get_port_state(struct ib_dev
static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, struct net_device *ndev, + struct net_device *upper, u32 *port_num) { struct net_device *rep_ndev; @@@ -138,14 -137,6 +138,14 @@@ if (!port->rep) continue;
+ if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) { + *port_num = i + 1; + return &port->roce; + } + + if (upper && port->rep->vport == MLX5_VPORT_UPLINK) + continue; + read_lock(&port->roce.netdev_lock); rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw, port->rep->vport); @@@ -205,12 -196,11 +205,12 @@@ static int mlx5_netdev_event(struct not }
if (ibdev->is_rep) - roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); + roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num); if (!roce) return NOTIFY_DONE; - if ((upper == ndev || (!upper && ndev == roce->netdev)) - && ibdev->ib_active) { + if ((upper == ndev || + ((!upper || ibdev->is_rep) && ndev == roce->netdev)) && + ibdev->ib_active) { struct ib_event ibev = { }; enum ib_port_state port_state;
@@@ -3022,7 -3012,7 +3022,7 @@@ static int mlx5_eth_lag_init(struct mlx struct mlx5_flow_table *ft; int err;
- if (!ns || !mlx5_lag_is_roce(mdev)) + if (!ns || !mlx5_lag_is_active(mdev)) return 0;
err = mlx5_cmd_create_vport_lag(mdev); @@@ -3084,11 -3074,9 +3084,11 @@@ static int mlx5_enable_eth(struct mlx5_ { int err;
- err = mlx5_nic_vport_enable_roce(dev->mdev); - if (err) - return err; + if (!dev->is_rep && dev->profile != &raw_eth_profile) { + err = mlx5_nic_vport_enable_roce(dev->mdev); + if (err) + return err; + }
err = mlx5_eth_lag_init(dev); if (err) @@@ -3097,8 -3085,7 +3097,8 @@@ return 0;
err_disable_roce: - mlx5_nic_vport_disable_roce(dev->mdev); + if (!dev->is_rep && dev->profile != &raw_eth_profile) + mlx5_nic_vport_disable_roce(dev->mdev);
return err; } @@@ -3106,8 -3093,7 +3106,8 @@@ static void mlx5_disable_eth(struct mlx5_ib_dev *dev) { mlx5_eth_lag_cleanup(dev); - mlx5_nic_vport_disable_roce(dev->mdev); + if (!dev->is_rep && dev->profile != &raw_eth_profile) + mlx5_nic_vport_disable_roce(dev->mdev); }
static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num, @@@ -3964,7 -3950,12 +3964,7 @@@ static int mlx5_ib_roce_init(struct mlx
/* Register only for native ports */ err = mlx5_add_netdev_notifier(dev, port_num); - if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev)) - /* - * We don't enable ETH interface for - * 1. IB representors - * 2. User disabled ROCE through devlink interface - */ + if (err) return err;
err = mlx5_enable_eth(dev); @@@ -3989,7 -3980,8 +3989,7 @@@ static void mlx5_ib_roce_cleanup(struc ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) { - if (!dev->is_rep) - mlx5_disable_eth(dev); + mlx5_disable_eth(dev);
port_num = mlx5_core_native_port_num(dev->mdev) - 1; mlx5_remove_netdev_notifier(dev, port_num); @@@ -4045,7 -4037,7 +4045,7 @@@ static int mlx5_ib_stage_ib_reg_init(st { const char *name;
- if (!mlx5_lag_is_roce(dev->mdev)) + if (!mlx5_lag_is_active(dev->mdev)) name = "mlx5_%d"; else name = "mlx5_bond_%d"; @@@ -4462,7 -4454,8 +4462,8 @@@ static void mlx5r_mp_remove(struct auxi mutex_lock(&mlx5_ib_multiport_mutex); if (mpi->ibdev) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); - list_del(&mpi->list); + else + list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); kfree(mpi); } diff --combined drivers/net/can/usb/esd_usb2.c index 7370981e9b34,95ae740fc311..c6068a251fbe --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@@ -224,8 -224,8 +224,8 @@@ static void esd_usb2_rx_event(struct es if (id == ESD_EV_CAN_ERROR_EXT) { u8 state = msg->msg.rx.data[0]; u8 ecc = msg->msg.rx.data[1]; - u8 txerr = msg->msg.rx.data[2]; - u8 rxerr = msg->msg.rx.data[3]; + u8 rxerr = msg->msg.rx.data[2]; + u8 txerr = msg->msg.rx.data[3];
skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { @@@ -476,7 -476,7 +476,7 @@@ static void esd_usb2_write_bulk_callbac netif_trans_update(netdev); }
-static ssize_t show_firmware(struct device *d, +static ssize_t firmware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@@ -487,9 -487,9 +487,9 @@@ (dev->version >> 8) & 0xf, dev->version & 0xff); } -static DEVICE_ATTR(firmware, 0444, show_firmware, NULL); +static DEVICE_ATTR_RO(firmware);
-static ssize_t show_hardware(struct device *d, +static ssize_t hardware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@@ -500,9 -500,9 +500,9 @@@ (dev->version >> 24) & 0xf, (dev->version >> 16) & 0xff); } -static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); +static DEVICE_ATTR_RO(hardware);
-static ssize_t show_nets(struct device *d, +static ssize_t nets_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@@ -510,7 -510,7 +510,7 @@@
return sprintf(buf, "%d", dev->net_count); } -static DEVICE_ATTR(nets, 0444, show_nets, NULL); +static DEVICE_ATTR_RO(nets);
static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) { diff --combined drivers/net/dsa/hirschmann/hellcreek.c index 3faff95fd49f,7062db6a083c..542cfc4ccb08 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@@ -1345,7 -1345,6 +1345,7 @@@ static int hellcreek_setup(struct dsa_s * filtering setups are not supported. */ ds->vlan_filtering_is_global = true; + ds->needs_standalone_vlan_filtering = true;
/* Intercept _all_ PTP multicast traffic */ ret = hellcreek_setup_fdb(hellcreek); @@@ -1473,9 -1472,6 +1473,6 @@@ static void hellcreek_setup_gcl(struct u16 data; u8 gates;
- cur++; - next++; - if (i == schedule->num_entries) gates = initial->gate_mask ^ cur->gate_mask; @@@ -1504,6 -1500,9 +1501,9 @@@ (initial->gate_mask << TR_GCLCMD_INIT_GATE_STATES_SHIFT); hellcreek_write(hellcreek, data, TR_GCLCMD); + + cur++; + next++; } }
@@@ -1551,7 -1550,7 +1551,7 @@@ static bool hellcreek_schedule_startabl /* Calculate difference to admin base time */ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
- return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC; + return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC; }
static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port) diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index efa6c98d7459,710cb00ce3a3..0d9cda4ab303 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -3872,7 -3872,7 +3872,7 @@@ static const struct net_device_ops cxgb .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_set_features = cxgb_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb_ioctl, + .ndo_eth_ioctl = cxgb_ioctl, .ndo_change_mtu = cxgb_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, @@@ -4008,7 -4008,7 +4008,7 @@@ static void adap_free_hma_mem(struct ad
if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, - adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); + adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; }
@@@ -5068,6 -5068,7 +5068,7 @@@ static int adap_init0(struct adapter *a ret = -ENOMEM; goto bye; } + bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz); #endif
params[0] = FW_PARAM_PFVF(CLIP_START); @@@ -6162,7 -6163,8 +6163,7 @@@ static void print_port_info(const struc --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "%s: Chelsio %s (%s) %s\n", - dev->name, adap->params.vpd.id, adap->name, buf); + netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); }
/* @@@ -6686,10 -6688,16 +6687,10 @@@ static int init_one(struct pci_dev *pde return 0; }
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { highdma = true; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_free_adapter; - } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_free_adapter; @@@ -6781,13 -6789,11 +6782,11 @@@
setup_memwin(adapter); err = adap_init0(adapter, 0); - #ifdef CONFIG_DEBUG_FS - bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); - #endif - setup_memwin_rdma(adapter); if (err) goto out_unmap_bar;
+ setup_memwin_rdma(adapter); + /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 13042f1cac6f,eb748aa35952..444c46241afc --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@@ -169,19 -169,17 +169,19 @@@ static bool hclge_is_special_opcode(u1 /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, - HCLGE_OPC_STATS_MAC, - HCLGE_OPC_STATS_MAC_ALL, - HCLGE_OPC_QUERY_32_BIT_REG, - HCLGE_OPC_QUERY_64_BIT_REG, - HCLGE_QUERY_CLEAR_MPF_RAS_INT, - HCLGE_QUERY_CLEAR_PF_RAS_INT, - HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, - HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, - HCLGE_QUERY_ALL_ERR_INFO}; + static const u16 spec_opcode[] = { + HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + HCLGE_QUERY_ALL_ERR_INFO + }; int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@@ -575,9 -573,13 +575,13 @@@ static void hclge_cmd_uninit_regs(struc
void hclge_cmd_uninit(struct hclge_dev *hdev) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); hclge_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 8e5be127909b,ac70d49e205d..53872c7b2940 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@@ -9,6 -9,7 +9,7 @@@ #include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000 + #define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGE_DESC_DATA_LEN 6
struct hclge_dev; @@@ -270,6 -271,9 +271,9 @@@ enum hclge_opcode_type /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + /* NCL config command */ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
@@@ -316,9 -320,6 +320,9 @@@ /* PHY command */ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, HCLGE_OPC_PHY_REG = 0x7026, + + /* Query link diagnosis info command */ + HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, };
#define HCLGE_TQP_REG_OFFSET 0x80000 diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f6882090d38e,03ae122f1c9a..1b6bb0d71fcb --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@@ -23,7 -23,6 +23,7 @@@ #include "hclge_tm.h" #include "hclge_err.h" #include "hnae3.h" +#include "hclge_devlink.h"
#define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) @@@ -1551,6 -1550,7 +1551,7 @@@ static int hclge_configure(struct hclge hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
@@@ -1619,7 -1619,7 +1620,7 @@@ static int hclge_config_tso(struct hclg return hclge_cmd_send(&hdev->hw, &desc, 1); }
- static int hclge_config_gro(struct hclge_dev *hdev, bool en) + static int hclge_config_gro(struct hclge_dev *hdev) { struct hclge_cfg_gro_status_cmd *req; struct hclge_desc desc; @@@ -1631,7 -1631,7 +1632,7 @@@ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclge_cfg_gro_status_cmd *)desc.data;
- req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@@ -1814,7 -1814,6 +1815,7 @@@ static int hclge_vport_setup(struct hcl nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; nic->numa_node_mask = hdev->numa_node_mask; + nic->kinfo.io_base = hdev->hw.io_base;
ret = hclge_knic_setup(vport, num_tqps, hdev->num_tx_desc, hdev->num_rx_desc); @@@ -2954,12 -2953,12 +2955,12 @@@ static void hclge_update_link_status(st }
if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, state);
- hdev->hw.mac.link = state; hclge_push_link_status(hdev); }
@@@ -3790,12 -3789,6 +3791,12 @@@ static void hclge_do_reset(struct hclge }
switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; case HNAE3_GLOBAL_RESET: dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@@ -10081,7 -10074,11 +10082,11 @@@ static int hclge_init_vlan_config(struc static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { - struct hclge_vport_vlan_cfg *vlan; + struct hclge_vport_vlan_cfg *vlan, *tmp; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id == vlan_id) + return;
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) @@@ -11451,6 -11448,28 +11456,28 @@@ static void hclge_clear_resetting_state } }
+ static int hclge_clear_hw_resource(struct hclge_dev *hdev) + { + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; + } + static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) { if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) @@@ -11490,20 -11509,20 +11517,24 @@@ static int hclge_init_ae_dev(struct hna if (ret) goto out;
+ ret = hclge_devlink_init(hdev); + if (ret) + goto err_pci_uninit; + /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); if (ret) - goto err_pci_uninit; + goto err_devlink_uninit;
/* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) goto err_cmd_uninit;
+ ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + ret = hclge_get_cap(hdev); if (ret) goto err_cmd_uninit; @@@ -11568,7 -11587,7 +11599,7 @@@ goto err_mdiobus_unreg; }
- ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) goto err_mdiobus_unreg;
@@@ -11670,8 -11689,6 +11701,8 @@@ err_msi_uninit pci_free_irq_vectors(pdev); err_cmd_uninit: hclge_cmd_uninit(hdev); +err_devlink_uninit: + hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@@ -11951,7 -11968,7 +11982,7 @@@ static int hclge_reset_ae_dev(struct hn return ret; }
- ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) return ret;
@@@ -12062,7 -12079,6 +12093,7 @@@ static void hclge_uninit_ae_dev(struct
hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); + hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); @@@ -12686,8 -12702,15 +12717,15 @@@ static int hclge_gro_en(struct hnae3_ha { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret;
- return hclge_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; }
static void hclge_sync_promisc_mode(struct hclge_dev *hdev) @@@ -12844,29 -12867,6 +12882,29 @@@ static int hclge_get_module_eeprom(stru return 0; }
+static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, + u32 *status_code) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query link diagnosis info, ret = %d\n", ret); + return ret; + } + + *status_code = le32_to_cpu(desc.data[0]); + return 0; +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@@ -12967,7 -12967,6 +13005,7 @@@ .set_tx_hwts_info = hclge_ptp_set_tx_info, .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, + .get_link_diagnosis_info = hclge_get_link_diagnosis_info, };
static struct hnae3_ae_algo ae_algo = { diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ada5c68f2851,e446b839a371..b6c1153945e5 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@@ -8,7 -8,6 +8,7 @@@ #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/kfifo.h> +#include <net/devlink.h>
#include "hclge_cmd.h" #include "hclge_ptp.h" @@@ -194,7 -193,6 +194,7 @@@ enum HLCGE_PORT_TYPE #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U #define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U +#define HCLGE_TRIGGER_IMP_RESET_B 7U
#define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) @@@ -929,6 -927,7 +929,7 @@@ struct hclge_dev unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; + bool gro_en;
u16 wanted_umv_size; /* max available unicast mac vlan space */ @@@ -945,7 -944,6 +946,7 @@@ cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; struct hclge_ptp *ptp; + struct devlink *devlink; };
/* VPort level vlan tag configuration for TX direction */ diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index ff651739f16b,938654778979..60588b194fe7 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@@ -8,7 -8,6 +8,7 @@@ #include "hclgevf_main.h" #include "hclge_mbx.h" #include "hnae3.h" +#include "hclgevf_devlink.h"
#define HCLGEVF_NAME "hclgevf"
@@@ -507,10 -506,10 +507,10 @@@ void hclgevf_update_link_status(struct link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, !!link_state); - hdev->hw.mac.link = link_state; }
clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); @@@ -539,7 -538,6 +539,7 @@@ static int hclgevf_set_handle_info(stru nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; + nic->kinfo.io_base = hdev->hw.io_base;
ret = hclgevf_knic_setup(hdev); if (ret) @@@ -2489,6 -2487,8 +2489,8 @@@ static int hclgevf_configure(struct hcl { int ret;
+ hdev->gro_en = true; + ret = hclgevf_get_basic_info(hdev); if (ret) return ret; @@@ -2551,7 -2551,7 +2553,7 @@@ static int hclgevf_init_roce_base_info( return 0; }
- static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) + static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; struct hclgevf_desc desc; @@@ -2564,7 -2564,7 +2566,7 @@@ false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
- req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@@ -3310,7 -3310,7 +3312,7 @@@ static int hclgevf_reset_hdev(struct hc return ret; }
- ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) return ret;
@@@ -3339,10 -3339,6 +3341,10 @@@ static int hclgevf_init_hdev(struct hcl if (ret) return ret;
+ ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_devlink_init; + ret = hclgevf_cmd_queue_init(hdev); if (ret) goto err_cmd_queue_init; @@@ -3395,7 -3391,7 +3397,7 @@@ if (ret) goto err_config;
- ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) goto err_config;
@@@ -3447,8 -3443,6 +3449,8 @@@ err_misc_irq_init err_cmd_init: hclgevf_cmd_uninit(hdev); err_cmd_queue_init: + hclgevf_devlink_uninit(hdev); +err_devlink_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; @@@ -3470,7 -3464,6 +3472,7 @@@ static void hclgevf_uninit_hdev(struct }
hclgevf_cmd_uninit(hdev); + hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); hclgevf_uninit_mac_list(hdev); } @@@ -3647,8 -3640,15 +3649,15 @@@ void hclgevf_update_speed_duplex(struc static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret;
- return hclgevf_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; }
static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 6f222a3a0bf2,e8013be055f8..73e8bb5efc30 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@@ -6,7 -6,6 +6,7 @@@ #include <linux/fs.h> #include <linux/if_vlan.h> #include <linux/types.h> +#include <net/devlink.h> #include "hclge_mbx.h" #include "hclgevf_cmd.h" #include "hnae3.h" @@@ -311,6 -310,8 +311,8 @@@ struct hclgevf_dev u16 *vector_status; int *vector_irq;
+ bool gro_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclgevf_mac_table_cfg mac_table; @@@ -331,8 -332,6 +333,8 @@@ u32 flag; unsigned long serv_processed_cnt; unsigned long last_serv_processed; + + struct devlink *devlink; };
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) diff --combined drivers/net/ethernet/intel/e1000e/ich8lan.c index 2f97c9f5611d,a80336c4319b..60c582a16821 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@@ -321,7 -321,6 +321,7 @@@ static s32 e1000_init_phy_workarounds_p case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if (e1000_phy_is_accessible_pchlan(hw)) break;
@@@ -467,7 -466,6 +467,7 @@@ static s32 e1000_init_phy_params_pchlan case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@@ -713,7 -711,6 +713,7 @@@ static s32 e1000_init_mac_params_ich8la case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@@ -1009,6 -1006,8 +1009,8 @@@ static s32 e1000_platform_pm_pch_lpt(st { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u16 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */
if (link) { @@@ -1062,7 -1061,17 +1064,17 @@@ E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
- if (lat_enc > max_ltr_enc) + lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((lat_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((max_ltr_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; }
@@@ -1269,11 -1278,9 +1281,11 @@@ static s32 e1000_disable_ulp_lpt_lp(str usleep_range(10000, 11000); } if (firmware_bug) - e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10); + e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n", + i * 10); else - e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + e_dbg("ULP_CONFIG_DONE cleared after %d msec\n", + i * 10);
if (force) { mac_reg = er32(H2ME); @@@ -1668,7 -1675,6 +1680,7 @@@ static s32 e1000_get_variants_ich8lan(s case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@@ -2124,7 -2130,6 +2136,7 @@@ static s32 e1000_sw_lcd_config_ich8lan( case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@@ -3169,7 -3174,6 +3181,7 @@@ static s32 e1000_valid_nvm_bank_detect_ case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD;
@@@ -4109,7 -4113,6 +4121,7 @@@ static s32 e1000_validate_nvm_checksum_ case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@@ -4124,13 -4127,17 +4136,17 @@@ return ret_val;
if (!(data & valid_csum_mask)) { - data |= valid_csum_mask; - ret_val = e1000_write_nvm(hw, word, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - if (ret_val) - return ret_val; + e_dbg("NVM Checksum Invalid\n"); + + if (hw->mac.type < e1000_pch_cnp) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } }
return e1000e_validate_nvm_checksum_generic(hw); diff --combined drivers/net/ethernet/intel/e1000e/ich8lan.h index 9b145f6248a8,e757896287eb..d6a092e5ee74 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@@ -41,15 -41,12 +41,15 @@@ #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 #define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ +#define E1000_EXFWSM_DPG_EXIT_DONE 0x00000001
/* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
#define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_START_DPG 0x00000001 /* indicate the ME of DPG */ +#define E1000_H2ME_EXIT_DPG 0x00000002 /* indicate the ME exit DPG */ #define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ #define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
@@@ -277,8 -274,11 +277,11 @@@
/* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 + #define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 + #define E1000_LTRV_SCALE_SHIFT 10 + #define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) diff --combined drivers/net/ethernet/intel/ice/ice_devlink.c index 8c863d64930b,7fe6e8ea39f0..14afce82ef63 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@@ -42,7 -42,9 +42,9 @@@ static int ice_info_pba(struct ice_pf *
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) - return -EIO; + /* We failed to locate the PBA, so just skip this entry */ + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", + ice_stat_str(status));
return 0; } @@@ -475,7 -477,7 +477,7 @@@ struct ice_pf *ice_allocate_pf(struct d { struct devlink *devlink;
- devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf)); + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); if (!devlink) return NULL;
@@@ -502,7 -504,7 +504,7 @@@ int ice_devlink_register(struct ice_pf struct device *dev = ice_pf_to_dev(pf); int err;
- err = devlink_register(devlink, dev); + err = devlink_register(devlink); if (err) { dev_err(dev, "devlink registration failed: %d\n", err); return err; diff --combined drivers/net/ethernet/intel/igc/igc_main.c index db1c63e8802a,ed2d66bc2d6c..c6c075a637ea --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@@ -12,8 -12,6 +12,8 @@@ #include <net/pkt_sched.h> #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> +#include <linux/pci.h> + #include <net/ipv6.h>
#include "igc.h" @@@ -151,6 -149,9 +151,9 @@@ static void igc_release_hw_control(stru struct igc_hw *hw = &adapter->hw; u32 ctrl_ext;
+ if (!pci_device_is_present(adapter->pdev)) + return; + /* Let firmware take over control of h/w */ ctrl_ext = rd32(IGC_CTRL_EXT); wr32(IGC_CTRL_EXT, @@@ -3077,320 -3078,11 +3080,320 @@@ static void igc_del_etype_filter(struc etype); }
+static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + static int igc_enable_nfc_rule(struct igc_adapter *adapter, - const struct igc_nfc_rule *rule) + struct igc_nfc_rule *rule) { int err;
+ if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { err = igc_add_etype_filter(adapter, rule->filter.etype, rule->action); @@@ -3427,11 -3119,6 +3430,11 @@@ static void igc_disable_nfc_rule(struct igc_adapter *adapter, const struct igc_nfc_rule *rule) { + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) igc_del_etype_filter(adapter, rule->filter.etype);
@@@ -4765,26 -4452,29 +4768,29 @@@ void igc_down(struct igc_adapter *adapt
igc_ptp_suspend(adapter);
- /* disable receives in the hardware */ - rctl = rd32(IGC_RCTL); - wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); - /* flush and sleep below */ - + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev);
netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev);
- /* disable transmits in the hardware */ - tctl = rd32(IGC_TCTL); - tctl &= ~IGC_TCTL_EN; - wr32(IGC_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - usleep_range(10000, 20000); + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000);
- igc_irq_disable(adapter); + igc_irq_disable(adapter); + }
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
@@@ -5127,7 -4817,6 +5133,7 @@@ static irqreturn_t igc_msix_ring(int ir */ static int igc_request_msix(struct igc_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev;
@@@ -5136,13 -4825,7 +5142,13 @@@ if (err) goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igc_q_vector *q_vector = adapter->q_vector[i];
vector++; @@@ -5221,12 -4904,20 +5227,12 @@@ bool igc_has_link(struct igc_adapter *a * false until the igc_check_for_link establishes link * for copper adapters ONLY */ - switch (hw->phy.media_type) { - case igc_media_type_copper: - if (!hw->mac.get_link_status) - return true; - hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; - default: - case igc_media_type_unknown: - break; - } + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status;
- if (hw->mac.type == igc_i225 && - hw->phy.id == I225_I_PHY_ID) { + if (hw->mac.type == igc_i225) { if (!netif_carrier_ok(adapter->netdev)) { adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { @@@ -5314,9 -5005,7 +5320,9 @@@ static void igc_watchdog_task(struct wo adapter->tx_timeout_factor = 14; break; case SPEED_100: - /* maybe add some timeout factor ? */ + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 7; break; }
@@@ -5806,7 -5495,7 +5812,7 @@@ static bool validate_schedule(struct ig if (e->command != TC_TAPRIO_CMD_SET_GATES) return false;
- for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { if (e->gate_mask & BIT(i)) queue_uses[i]++;
@@@ -5863,7 -5552,7 +5869,7 @@@ static int igc_save_qbv_schedule(struc
end_time += e->interval;
- for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i))) @@@ -6015,7 -5704,7 +6021,7 @@@ static const struct net_device_ops igc_ .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, - .ndo_do_ioctl = igc_ioctl, + .ndo_eth_ioctl = igc_ioctl, .ndo_setup_tc = igc_setup_tc, .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, @@@ -6176,10 -5865,6 +6182,10 @@@ static int igc_probe(struct pci_dev *pd
pci_enable_pcie_error_reporting(pdev);
+ err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + pci_set_master(pdev);
err = -ENOMEM; diff --combined drivers/net/ethernet/intel/igc/igc_ptp.c index f6848181cdbd,4ae19c6a3247..0f021909b430 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@@ -9,8 -9,6 +9,8 @@@ #include <linux/ptp_classify.h> #include <linux/clocksource.h> #include <linux/ktime.h> +#include <linux/delay.h> +#include <linux/iopoll.h>
#define INCVALUE_MASK 0x7fffffff #define ISGN 0x80000000 @@@ -18,9 -16,6 +18,9 @@@ #define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGC_PTP_TX_TIMEOUT (HZ * 15)
+#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + /* SYSTIM read access for I225 */ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) { @@@ -757,147 -752,6 +757,147 @@@ int igc_ptp_get_ts_config(struct net_de -EFAULT : 0; }
+/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + /** * igc_ptp_init - Initialize PTP functionality * @adapter: Board private structure @@@ -934,11 -788,6 +934,11 @@@ void igc_ptp_init(struct igc_adapter *a adapter->ptp_caps.n_per_out = IGC_N_PEROUT; adapter->ptp_caps.n_pins = IGC_N_SDP; adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; break; default: adapter->ptp_clock = NULL; @@@ -1000,7 -849,8 +1000,8 @@@ void igc_ptp_suspend(struct igc_adapte adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- igc_ptp_time_save(adapter); + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); }
/** @@@ -1029,9 -879,7 +1030,9 @@@ void igc_ptp_stop(struct igc_adapter *a void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; unsigned long flags; + u32 timadj;
/* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@@ -1040,38 -888,12 +1041,38 @@@
switch (adapter->hw.mac.type) { case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + wr32(IGC_TSAUXC, 0x0); wr32(IGC_TSSDP, 0x0); wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS | (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + break; default: /* No work to do. */ diff --combined drivers/net/ethernet/marvell/mvneta.c index 0e6d40701862,de32e5b49053..9d460a270601 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -105,7 -105,7 +105,7 @@@ #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) #define MVNETA_PORT_STATUS 0x2444 - #define MVNETA_TX_IN_PRGRS BIT(1) + #define MVNETA_TX_IN_PRGRS BIT(0) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c /* Only exists on Armada XP and Armada 370 */ @@@ -2327,7 -2327,7 +2327,7 @@@ mvneta_swbm_build_skb(struct mvneta_por if (!skb) return ERR_PTR(-ENOMEM);
- skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool); + skb_mark_for_recycle(skb);
skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); @@@ -2339,6 -2339,10 +2339,6 @@@ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag), PAGE_SIZE); - /* We don't need to reset pp_recycle here. It's already set, so - * just mark fragments for recycling. - */ - page_pool_store_mem_info(skb_frag_page(frag), pool); }
return skb; @@@ -2662,7 -2666,7 +2662,7 @@@ static int mvneta_tx_tso(struct sk_buf return 0;
if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { - pr_info("*** Is this even possible???!?!?\n"); + pr_info("*** Is this even possible?\n"); return 0; }
@@@ -3828,20 -3832,12 +3828,20 @@@ static void mvneta_validate(struct phyl struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_QSGMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_8023z(state->interface) && - !phy_interface_mode_is_rgmii(state->interface)) { + /* We only support QSGMII, SGMII, 802.3z and RGMII modes. + * When in 802.3z mode, we must have AN enabled: + * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + */ + if (phy_interface_mode_is_8023z(state->interface)) { + if (!phylink_test(state->advertising, Autoneg)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + } else if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); return; } @@@ -4500,11 -4496,8 +4500,11 @@@ static int mvneta_ethtool_nway_reset(st }
/* Set interrupt coalescing for ethtools */ -static int mvneta_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvneta_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); int queue; @@@ -4527,11 -4520,8 +4527,11 @@@ }
/* get coalescing for ethtools */ -static int mvneta_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvneta_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev);
@@@ -4996,7 -4986,7 +4996,7 @@@ static const struct net_device_ops mvne .ndo_change_mtu = mvneta_change_mtu, .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, - .ndo_do_ioctl = mvneta_ioctl, + .ndo_eth_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, .ndo_xdp_xmit = mvneta_xdp_xmit, .ndo_setup_tc = mvneta_setup_tc, diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index 6871d892eabf,6bb9ec98a12b..15ef59aa34ff --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -49,10 -49,11 +49,10 @@@ #define QED_NVM_CFG_MAX_ATTRS 50
static char version[] = - "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; + "QLogic FastLinQ 4xxxx Core Module qed\n";
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION);
#define FW_FILE_VERSION \ __stringify(FW_MAJOR_VERSION) "." \ @@@ -615,7 -616,12 +615,12 @@@ static int qed_enable_msix(struct qed_d rc = cnt; }
- if (rc > 0) { + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; @@@ -1215,10 -1221,6 +1220,10 @@@ static void qed_slowpath_task(struct wo
if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, &hwfn->slowpath_task_flags)) { + /* skip qed_db_rec_handler during recovery/unload */ + if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) + goto out; + qed_db_rec_handler(hwfn, ptt); if (hwfn->periodic_db_rec_count--) qed_slowpath_delayed_work(hwfn, @@@ -1226,7 -1228,6 +1231,7 @@@ QED_PERIODIC_DB_REC_INTERVAL); }
+out: qed_ptt_release(hwfn, ptt); }
diff --combined drivers/net/ethernet/qlogic/qede/qede_main.c index 4877cb88c31a,1c7f9ed6f1c1..9837bdb89cd4 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@@ -39,8 -39,12 +39,8 @@@ #include "qede.h" #include "qede_ptp.h"
-static char version[] = - "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; - MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION);
static uint debug; module_param(debug, uint, 0); @@@ -254,7 -258,7 +254,7 @@@ int __init qede_init(void { int ret;
- pr_info("qede_init: %s\n", version); + pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
qede_forced_speed_maps_init();
@@@ -640,7 -644,7 +640,7 @@@ static const struct net_device_ops qede .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, - .ndo_do_ioctl = qede_ioctl, + .ndo_eth_ioctl = qede_ioctl, .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, @@@ -1153,6 -1157,10 +1153,6 @@@ static int __qede_probe(struct pci_dev /* Start the Slowpath-process */ memset(&sp_params, 0, sizeof(sp_params)); sp_params.int_mode = QED_INT_MODE_MSIX; - sp_params.drv_major = QEDE_MAJOR_VERSION; - sp_params.drv_minor = QEDE_MINOR_VERSION; - sp_params.drv_rev = QEDE_REVISION_VERSION; - sp_params.drv_eng = QEDE_ENGINEERING_VERSION; strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { @@@ -1866,6 -1874,7 +1866,7 @@@ static void qede_sync_free_irqs(struct }
edev->int_info.used_cnt = 0; + edev->int_info.msix_cnt = 0; }
static int qede_req_msix_irqs(struct qede_dev *edev) @@@ -1898,12 -1907,6 +1899,12 @@@ &edev->fp_array[i]); if (rc) { DP_ERR(edev, "Request fp %d irq failed\n", i); +#ifdef CONFIG_RFS_ACCEL + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; +#endif qede_sync_free_irqs(edev); return rc; } @@@ -2296,15 -2299,6 +2297,15 @@@ static void qede_unload(struct qede_de
rc = qede_stop_queues(edev); if (rc) { +#ifdef CONFIG_RFS_ACCEL + if (edev->dev_info.common.b_arfs_capable) { + qede_poll_for_freeing_arfs_filters(edev); + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; + } +#endif qede_sync_free_irqs(edev); goto out; } @@@ -2434,7 -2428,6 +2435,6 @@@ static int qede_load(struct qede_dev *e goto out; err4: qede_sync_free_irqs(edev); - memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: @@@ -2635,10 -2628,8 +2635,10 @@@ static void qede_generic_hw_err_handler "Generic sleepable HW error handling started - err_flags 0x%lx\n", edev->err_flags);
- if (edev->devlink) + if (edev->devlink) { + DP_NOTICE(edev, "Reporting fatal error to devlink\n"); edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); + }
clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
@@@ -2660,8 -2651,6 +2660,8 @@@ static void qede_set_hw_err_flags(struc case QED_HW_ERR_FW_ASSERT: set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); + /* make this error as recoverable and start recovery*/ + set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags); break;
default: diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7b3fcf558603,fa90bcdf4e45..ed0cd3920171 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -2500,7 -2500,6 +2500,7 @@@ static int stmmac_tx_clean(struct stmma } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; + priv->xstats.txq_stats[queue].tx_pkt_n++; } if (skb) stmmac_get_tx_hwtstamp(priv, p, skb); @@@ -4915,6 -4914,10 +4915,10 @@@ read_again
prefetch(np);
+ /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, &priv->xstats, @@@ -4935,10 -4938,6 +4939,6 @@@ continue; }
- /* Ensure a valid XSK buffer before proceed */ - if (!buf->xdp) - break; - /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ if (likely(status & rx_not_ls)) { xsk_buff_free(buf->xdp); @@@ -5001,9 -5000,6 +5001,9 @@@
stmmac_finalize_xdp_rx(priv, xdp_status);
+ priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { if (failure || stmmac_rx_dirty(priv, queue) > 0) xsk_set_rx_need_wakeup(rx_q->xsk_pool); @@@ -5291,7 -5287,6 +5291,7 @@@ drain_data stmmac_rx_refill(priv, queue);
priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count;
return count; } @@@ -6456,7 -6451,7 +6456,7 @@@ static const struct net_device_ops stmm .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, - .ndo_do_ioctl = stmmac_ioctl, + .ndo_eth_ioctl = stmmac_ioctl, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER diff --combined drivers/net/mhi_net.c index 975f7f9bdf4c,e60e38c1f09d..d127eb6e9257 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@@ -11,42 -11,28 +11,42 @@@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/u64_stats_sync.h> -#include <linux/wwan.h> - -#include "mhi.h"
#define MHI_NET_MIN_MTU ETH_MIN_MTU #define MHI_NET_MAX_MTU 0xffff #define MHI_NET_DEFAULT_MTU 0x4000
-/* When set to false, the default netdev (link 0) is not created, and it's up - * to user to create the link (via wwan rtnetlink). - */ -static bool create_default_iface = true; -module_param(create_default_iface, bool, 0); +struct mhi_net_stats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; +}; + +struct mhi_net_dev { + struct mhi_device *mdev; + struct net_device *ndev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + struct delayed_work rx_refill; + struct mhi_net_stats stats; + u32 rx_queue_sz; + int msg_enable; + unsigned int mru; +};
struct mhi_device_info { const char *netname; - const struct mhi_net_proto *proto; };
static int mhi_ndo_open(struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
/* Feed the rx buffer pool */ schedule_delayed_work(&mhi_netdev->rx_refill, 0); @@@ -61,7 -47,7 +61,7 @@@
static int mhi_ndo_stop(struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
netif_stop_queue(ndev); netif_carrier_off(ndev); @@@ -72,10 -58,17 +72,10 @@@
static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); - const struct mhi_net_proto *proto = mhi_netdev->proto; + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); struct mhi_device *mdev = mhi_netdev->mdev; int err;
- if (proto && proto->tx_fixup) { - skb = proto->tx_fixup(mhi_netdev, skb); - if (unlikely(!skb)) - goto exit_drop; - } - err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); if (unlikely(err)) { net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", @@@ -100,7 -93,7 +100,7 @@@ exit_drop static void mhi_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); unsigned int start;
do { @@@ -108,6 -101,8 +108,6 @@@ stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); - stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); - stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors); } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
do { @@@ -170,6 -165,7 +170,6 @@@ static void mhi_net_dl_callback(struct struct mhi_result *mhi_res) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); - const struct mhi_net_proto *proto = mhi_netdev->proto; struct sk_buff *skb = mhi_res->buf_addr; int free_desc_count;
@@@ -209,6 -205,11 +209,6 @@@ mhi_netdev->skbagg_head = NULL; }
- u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); - u64_stats_inc(&mhi_netdev->stats.rx_packets); - u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); - u64_stats_update_end(&mhi_netdev->stats.rx_syncp); - switch (skb->data[0] & 0xf0) { case 0x40: skb->protocol = htons(ETH_P_IP); @@@ -221,11 -222,10 +221,11 @@@ break; }
- if (proto && proto->rx) - proto->rx(mhi_netdev, skb); - else - netif_rx(skb); + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_netdev->stats.rx_packets); + u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); + u64_stats_update_end(&mhi_netdev->stats.rx_syncp); + netif_rx(skb); }
/* Refill if RX buffers queue becomes low */ @@@ -248,6 -248,7 +248,6 @@@ static void mhi_net_ul_callback(struct
u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); if (unlikely(mhi_res->transaction_status)) { - /* MHI layer stopping/resetting the UL channel */ if (mhi_res->transaction_status == -ENOTCONN) { u64_stats_update_end(&mhi_netdev->stats.tx_syncp); @@@ -301,47 -302,78 +301,47 @@@ static void mhi_net_rx_refill_work(stru schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); }
-static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id, - struct netlink_ext_ack *extack) +static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) { - const struct mhi_device_info *info; - struct mhi_device *mhi_dev = ctxt; struct mhi_net_dev *mhi_netdev; int err;
- info = (struct mhi_device_info *)mhi_dev->id->driver_data; - - /* For now we only support one link (link context 0), driver must be - * reworked to break 1:1 relationship for net MBIM and to forward setup - * call to rmnet(QMAP) otherwise. - */ - if (if_id != 0) - return -EINVAL; - - if (dev_get_drvdata(&mhi_dev->dev)) - return -EBUSY; - - mhi_netdev = wwan_netdev_drvpriv(ndev); + mhi_netdev = netdev_priv(ndev);
dev_set_drvdata(&mhi_dev->dev, mhi_netdev); mhi_netdev->ndev = ndev; mhi_netdev->mdev = mhi_dev; mhi_netdev->skbagg_head = NULL; - mhi_netdev->proto = info->proto; + mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); u64_stats_init(&mhi_netdev->stats.rx_syncp); u64_stats_init(&mhi_netdev->stats.tx_syncp);
/* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); + err = mhi_prepare_for_transfer(mhi_dev); if (err) goto out_err;
/* Number of transfer descriptors determines size of the queue */ mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- if (extack) - err = register_netdevice(ndev); - else - err = register_netdev(ndev); + err = register_netdev(ndev); if (err) - goto out_err; - - if (mhi_netdev->proto) { - err = mhi_netdev->proto->init(mhi_netdev); - if (err) - goto out_err_proto; - } + return err;
return 0;
-out_err_proto: - unregister_netdevice(ndev); out_err: free_netdev(ndev); return err; }
-static void mhi_net_dellink(void *ctxt, struct net_device *ndev, - struct list_head *head) +static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); - struct mhi_device *mhi_dev = ctxt; + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
- if (head) - unregister_netdevice_queue(ndev, head); - else - unregister_netdev(ndev); + unregister_netdev(ndev);
mhi_unprepare_from_transfer(mhi_dev);
@@@ -350,34 -382,65 +350,34 @@@ dev_set_drvdata(&mhi_dev->dev, NULL); }
-static const struct wwan_ops mhi_wwan_ops = { - .priv_size = sizeof(struct mhi_net_dev), - .setup = mhi_net_setup, - .newlink = mhi_net_newlink, - .dellink = mhi_net_dellink, -}; - static int mhi_net_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; - struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; struct net_device *ndev; int err;
- err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev, - WWAN_NO_DEFAULT_LINK); - if (err) - return err; - - if (!create_default_iface) - return 0; - - /* Create a default interface which is used as either RMNET real-dev, - * MBIM link 0 or ip link 0) - */ ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, NET_NAME_PREDICTABLE, mhi_net_setup); - if (!ndev) { - err = -ENOMEM; - goto err_unregister; - } + if (!ndev) + return -ENOMEM;
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
- err = mhi_net_newlink(mhi_dev, ndev, 0, NULL); - if (err) - goto err_release; + err = mhi_net_newlink(mhi_dev, ndev); + if (err) { + free_netdev(ndev); + return err; + }
return 0; - -err_release: - free_netdev(ndev); -err_unregister: - wwan_unregister_ops(&cntrl->mhi_dev->dev); - - return err; }
static void mhi_net_remove(struct mhi_device *mhi_dev) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); - struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; - - /* WWAN core takes care of removing remaining links */ - wwan_unregister_ops(&cntrl->mhi_dev->dev);
- if (create_default_iface) - mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL); + mhi_net_dellink(mhi_dev, mhi_netdev->ndev); }
static const struct mhi_device_info mhi_hwip0 = { @@@ -388,11 -451,18 +388,11 @@@ static const struct mhi_device_info mhi .netname = "mhi_swip%d", };
-static const struct mhi_device_info mhi_hwip0_mbim = { - .netname = "mhi_mbim%d", - .proto = &proto_mbim, -}; - static const struct mhi_device_id mhi_net_id_table[] = { /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 }, /* Software data PATH (to modem CPU) */ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 }, - /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */ - { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim }, {} }; MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); diff --combined drivers/net/usb/asix_devices.c index cb01897c7a5d,dc87e8caf954..30821f6a6d7a --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@@ -197,7 -197,7 +197,7 @@@ static const struct net_device_ops ax88 .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = asix_ioctl, + .ndo_eth_ioctl = asix_ioctl, .ndo_set_rx_mode = ax88172_set_multicast, };
@@@ -354,24 -354,23 +354,23 @@@ out static int ax88772_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl; + int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5, in_pm); if (ret < 0) goto out;
- embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; }
- if (embd_phy) { + if (priv->embd_phy) { ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm); if (ret < 0) goto out; @@@ -449,17 -448,16 +448,16 @@@ out static int ax88772a_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl, phy14h, phy15h, phy16h; u8 chipcode = 0; + int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm); if (ret < 0) goto out;
- embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy | + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy | AX_PHYSEL_SSEN, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); @@@ -589,7 -587,7 +587,7 @@@ static const struct net_device_ops ax88 .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, };
@@@ -683,12 -681,6 +681,6 @@@ static int ax88772_init_phy(struct usbn struct asix_common_private *priv = dev->driver_priv; int ret;
- ret = asix_read_phy_addr(dev, true); - if (ret < 0) - return ret; - - priv->phy_addr = ret; - snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr);
@@@ -714,7 -706,14 +706,13 @@@ static int ax88772_bind(struct usbnet * u8 buf[ETH_ALEN] = {0}, chipcode = 0; struct asix_common_private *priv; int ret, i; - u32 phyid;
+ priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev->driver_priv = priv; + usbnet_get_endpoints(dev, intf);
/* Maybe the boot loader passed the MAC address via device tree */ @@@ -750,6 -749,13 +748,13 @@@ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
+ ret = asix_read_phy_addr(dev, true); + if (ret < 0) + return ret; + + priv->phy_addr = ret; + priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10); + asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK;
@@@ -761,6 -767,10 +766,6 @@@ return ret; }
- /* Read PHYID register *AFTER* the PHY was reset properly */ - phyid = asix_get_phyid(dev); - netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid); - /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support @@@ -768,12 -778,6 +773,6 @@@ dev->rx_urb_size = 2048; }
- priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - dev->driver_priv = priv; - priv->presvd_phy_bmcr = 0; priv->presvd_phy_advertise = 0; if (chipcode == AX_AX88772_CHIPCODE) { @@@ -812,6 -816,12 +811,12 @@@ static void ax88772_unbind(struct usbne asix_rx_fixup_common_free(dev->driver_priv); }
+ static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) + { + asix_rx_fixup_common_free(dev->driver_priv); + kfree(dev->driver_priv); + } + static const struct ethtool_ops ax88178_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, @@@ -1095,7 -1105,7 +1100,7 @@@ static const struct net_device_ops ax88 .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = asix_set_multicast, - .ndo_do_ioctl = asix_ioctl, + .ndo_eth_ioctl = asix_ioctl, .ndo_change_mtu = ax88178_change_mtu, };
@@@ -1210,7 -1220,6 +1215,7 @@@ static const struct driver_info ax88772 .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, + .stop = ax88772_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, @@@ -1221,7 -1230,7 +1226,7 @@@ static const struct driver_info ax88178_info = { .description = "ASIX AX88178 USB 2.0 Ethernet", .bind = ax88178_bind, - .unbind = ax88772_unbind, + .unbind = ax88178_unbind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, diff --combined drivers/net/usb/pegasus.c index 36dafcb3d04a,9f9dd0de33cb..6a92a3fef75e --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@@ -446,7 -446,7 +446,7 @@@ static int enable_net_traffic(struct ne write_mii_word(pegasus, 0, 0x1b, &auxmode); }
- return 0; + return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; @@@ -835,7 -835,7 +835,7 @@@ static int pegasus_open(struct net_devi if (!pegasus->rx_skb) goto exit;
- res = set_registers(pegasus, EthID, 6, net->dev_addr); + set_registers(pegasus, EthID, 6, net->dev_addr);
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), @@@ -1001,8 -1001,7 +1001,8 @@@ static const struct ethtool_ops ops = .set_link_ksettings = pegasus_set_link_ksettings, };
-static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) +static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq, + void __user *udata, int cmd) { __u16 *data = (__u16 *) &rq->ifr_ifru; pegasus_t *pegasus = netdev_priv(net); @@@ -1270,7 -1269,7 +1270,7 @@@ static int pegasus_resume(struct usb_in static const struct net_device_ops pegasus_netdev_ops = { .ndo_open = pegasus_open, .ndo_stop = pegasus_close, - .ndo_do_ioctl = pegasus_ioctl, + .ndo_siocdevprivate = pegasus_siocdevprivate, .ndo_start_xmit = pegasus_start_xmit, .ndo_set_rx_mode = pegasus_set_multicast, .ndo_tx_timeout = pegasus_tx_timeout, diff --combined drivers/net/wwan/mhi_wwan_mbim.c index 377529bbf124,000000000000..71bf9b4f769f mode 100644,000000..100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@@ -1,658 -1,0 +1,658 @@@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* MHI MBIM Network driver - Network/MBIM over MHI bus + * + * Copyright (C) 2021 Linaro Ltd loic.poulain@linaro.org + * + * This driver copy some code from cdc_ncm, which is: + * Copyright (C) ST-Ericsson 2010-2012 + * and cdc_mbim, which is: + * Copyright (c) 2012 Smith Micro Software, Inc. + * Copyright (c) 2012 Bj��rn Mork bjorn@mork.no + * + */ + +#include <linux/ethtool.h> +#include <linux/if_arp.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/mhi.h> +#include <linux/mii.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/u64_stats_sync.h> +#include <linux/usb.h> +#include <linux/usb/cdc.h> +#include <linux/usb/usbnet.h> +#include <linux/usb/cdc_ncm.h> +#include <linux/wwan.h> + +/* 3500 allows to optimize skb allocation, the skbs will basically fit in + * one 4K page. Large MBIM packets will simply be split over several MHI + * transfers and chained by the MHI net layer (zerocopy). + */ +#define MHI_DEFAULT_MRU 3500 + +#define MHI_MBIM_DEFAULT_MTU 1500 +#define MHI_MAX_BUF_SZ 0xffff + +#define MBIM_NDP16_SIGN_MASK 0x00ffffff + +#define MHI_MBIM_LINK_HASH_SIZE 8 +#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE) + +struct mhi_mbim_link { + struct mhi_mbim_context *mbim; + struct net_device *ndev; + unsigned int session; + + /* stats */ + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; + + struct hlist_node hlnode; +}; + +struct mhi_mbim_context { + struct mhi_device *mdev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + unsigned int mru; + u32 rx_queue_sz; + u16 rx_seq; + u16 tx_seq; + struct delayed_work rx_refill; + spinlock_t tx_lock; + struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE]; +}; + +struct mbim_tx_hdr { + struct usb_cdc_ncm_nth16 nth16; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16[2]; +} __packed; + +static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim, + unsigned int session) +{ + struct mhi_mbim_link *link; + + hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) { + if (link->session == session) + return link; + } + + return NULL; +} + +static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session, + u16 tx_seq) +{ + unsigned int dgram_size = skb->len; + struct usb_cdc_ncm_nth16 *nth16; + struct usb_cdc_ncm_ndp16 *ndp16; + struct mbim_tx_hdr *mbim_hdr; + + /* Only one NDP is sent, containing the IP packet (no aggregation) */ + + /* Ensure we have enough headroom for crafting MBIM header */ + if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) { + dev_kfree_skb_any(skb); + return NULL; + } + + mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr)); + + /* Fill NTB header */ + nth16 = &mbim_hdr->nth16; + nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + nth16->wSequence = cpu_to_le16(tx_seq); + nth16->wBlockLength = cpu_to_le16(skb->len); + nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + + /* Fill the unique NDP */ + ndp16 = &mbim_hdr->ndp16; + ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24)); + ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + + sizeof(struct usb_cdc_ncm_dpe16) * 2); + ndp16->wNextNdpIndex = 0; + + /* Datagram follows the mbim header */ + ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr)); + ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size); + + /* null termination */ + ndp16->dpe16[1].wDatagramIndex = 0; + ndp16->dpe16[1].wDatagramLength = 0; + + return skb; +} + +static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + struct mhi_mbim_context *mbim = link->mbim; + unsigned long flags; + int err = -ENOMEM; + + /* Serialize MHI channel queuing and MBIM seq */ + spin_lock_irqsave(&mbim->tx_lock, flags); + + skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq); + if (unlikely(!skb)) + goto exit_unlock; + + err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); + + if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE)) + netif_stop_queue(ndev); + + if (!err) + mbim->tx_seq++; + +exit_unlock: + spin_unlock_irqrestore(&mbim->tx_lock, flags); + + if (unlikely(err)) { + net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", + ndev->name, err); + dev_kfree_skb_any(skb); + goto exit_drop; + } + + return NETDEV_TX_OK; + +exit_drop: + u64_stats_update_begin(&link->tx_syncp); + u64_stats_inc(&link->tx_dropped); + u64_stats_update_end(&link->tx_syncp); + + return NETDEV_TX_OK; +} + +static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb) +{ + struct usb_cdc_ncm_nth16 *nth16; + int len; + + if (skb->len < sizeof(struct usb_cdc_ncm_nth16) + + sizeof(struct usb_cdc_ncm_ndp16)) { + net_err_ratelimited("frame too short\n"); + return -EINVAL; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + net_err_ratelimited("invalid NTH16 signature <%#010x>\n", + le32_to_cpu(nth16->dwSignature)); + return -EINVAL; + } + + /* No limit on the block length, except the size of the data pkt */ + len = le16_to_cpu(nth16->wBlockLength); + if (len > skb->len) { + net_err_ratelimited("NTB does not fit into the skb %u/%u\n", + len, skb->len); + return -EINVAL; + } + + if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) && + (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) && + !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) { + net_err_ratelimited("sequence number glitch prev=%d curr=%d\n", + mbim->rx_seq, le16_to_cpu(nth16->wSequence)); + } + mbim->rx_seq = le16_to_cpu(nth16->wSequence); + + return le16_to_cpu(nth16->wNdpIndex); +} + +static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16) +{ + int ret; + + if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { + net_err_ratelimited("invalid DPT16 length <%u>\n", + le16_to_cpu(ndp16->wLength)); + return -EINVAL; + } + + ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) + / sizeof(struct usb_cdc_ncm_dpe16)); + ret--; /* Last entry is always a NULL terminator */ + + if (sizeof(struct usb_cdc_ncm_ndp16) + + ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) { + net_err_ratelimited("Invalid nframes = %d\n", ret); + return -EINVAL; + } + + return ret; +} + +static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb) +{ + int ndpoffset; + + /* Check NTB header and retrieve first NDP offset */ + ndpoffset = mbim_rx_verify_nth16(mbim, skb); + if (ndpoffset < 0) { + net_err_ratelimited("mbim: Incorrect NTB header\n"); + goto error; + } + + /* Process each NDP */ + while (1) { + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16; + struct mhi_mbim_link *link; + int nframes, n, dpeoffset; + unsigned int session; + + if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) { + net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n", + ndpoffset); + goto error; + } + + /* Check NDP header and retrieve number of datagrams */ + nframes = mbim_rx_verify_ndp16(skb, &ndp16); + if (nframes < 0) { + net_err_ratelimited("mbim: Incorrect NDP16\n"); + goto error; + } + + /* Only IP data type supported, no DSS in MHI context */ + if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK)) + != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) { + net_err_ratelimited("mbim: Unsupported NDP type\n"); + goto next_ndp; + } + + session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24; + + rcu_read_lock(); + + link = mhi_mbim_get_link_rcu(mbim, session); + if (!link) { + net_err_ratelimited("mbim: bad packet session (%u)\n", session); + goto unlock; + } + + /* de-aggregate and deliver IP packets */ + dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16); + for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) { + u16 dgram_offset, dgram_len; + struct sk_buff *skbn; + + if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16))) + break; + + dgram_offset = le16_to_cpu(dpe16.wDatagramIndex); + dgram_len = le16_to_cpu(dpe16.wDatagramLength); + + if (!dgram_offset || !dgram_len) + break; /* null terminator */ + + skbn = netdev_alloc_skb(link->ndev, dgram_len); + if (!skbn) + continue; + + skb_put(skbn, dgram_len); + skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len); + + switch (skbn->data[0] & 0xf0) { + case 0x40: + skbn->protocol = htons(ETH_P_IP); + break; + case 0x60: + skbn->protocol = htons(ETH_P_IPV6); + break; + default: + net_err_ratelimited("%s: unknown protocol\n", + link->ndev->name); + dev_kfree_skb_any(skbn); + u64_stats_update_begin(&link->rx_syncp); + u64_stats_inc(&link->rx_errors); + u64_stats_update_end(&link->rx_syncp); + continue; + } + + u64_stats_update_begin(&link->rx_syncp); + u64_stats_inc(&link->rx_packets); + u64_stats_add(&link->rx_bytes, skbn->len); + u64_stats_update_end(&link->rx_syncp); + + netif_rx(skbn); + } +unlock: + rcu_read_unlock(); +next_ndp: + /* Other NDP to process? */ + ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex); + if (!ndpoffset) + break; + } + + /* free skb */ + dev_consume_skb_any(skb); + return; +error: + dev_kfree_skb_any(skb); +} + +static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim, + struct sk_buff *skb) +{ + struct sk_buff *head = mbim->skbagg_head; + struct sk_buff *tail = mbim->skbagg_tail; + + /* This is non-paged skb chaining using frag_list */ + if (!head) { + mbim->skbagg_head = skb; + return skb; + } + + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = skb; + else + tail->next = skb; + + head->len += skb->len; + head->data_len += skb->len; + head->truesize += skb->truesize; + + mbim->skbagg_tail = skb; + + return mbim->skbagg_head; +} + +static void mhi_net_rx_refill_work(struct work_struct *work) +{ + struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context, + rx_refill.work); + struct mhi_device *mdev = mbim->mdev; + int err; + + while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { + struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL); + + if (unlikely(!skb)) + break; + + err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, + MHI_DEFAULT_MRU, MHI_EOT); + if (unlikely(err)) { + kfree_skb(skb); + break; + } + + /* Do not hog the CPU if rx buffers are consumed faster than + * queued (unlikely). + */ + cond_resched(); + } + + /* If we're still starved of rx buffers, reschedule later */ + if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz) + schedule_delayed_work(&mbim->rx_refill, HZ / 2); +} + +static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + int free_desc_count; + + free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + if (unlikely(mhi_res->transaction_status)) { + switch (mhi_res->transaction_status) { + case -EOVERFLOW: + /* Packet has been split over multiple transfers */ + skb_put(skb, mhi_res->bytes_xferd); + mhi_net_skb_agg(mbim, skb); + break; + case -ENOTCONN: + /* MHI layer stopping/resetting the DL channel */ + dev_kfree_skb_any(skb); + return; + default: + /* Unknown error, simply drop */ + dev_kfree_skb_any(skb); + } + } else { + skb_put(skb, mhi_res->bytes_xferd); + + if (mbim->skbagg_head) { + /* Aggregate the final fragment */ + skb = mhi_net_skb_agg(mbim, skb); + mbim->skbagg_head = NULL; + } + + mhi_mbim_rx(mbim, skb); + } + + /* Refill if RX buffers queue becomes low */ + if (free_desc_count >= mbim->rx_queue_sz / 2) + schedule_delayed_work(&mbim->rx_refill, 0); +} + +static void mhi_mbim_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&link->rx_syncp); + stats->rx_packets = u64_stats_read(&link->rx_packets); + stats->rx_bytes = u64_stats_read(&link->rx_bytes); + stats->rx_errors = u64_stats_read(&link->rx_errors); + } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&link->tx_syncp); + stats->tx_packets = u64_stats_read(&link->tx_packets); + stats->tx_bytes = u64_stats_read(&link->tx_bytes); + stats->tx_errors = u64_stats_read(&link->tx_errors); + stats->tx_dropped = u64_stats_read(&link->tx_dropped); + } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start)); +} + +static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + struct net_device *ndev = skb->dev; + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + /* Hardware has consumed the buffer, so free the skb (which is not + * freed by the MHI stack) and perform accounting. + */ + dev_consume_skb_any(skb); + + u64_stats_update_begin(&link->tx_syncp); + if (unlikely(mhi_res->transaction_status)) { + /* MHI layer stopping/resetting the UL channel */ + if (mhi_res->transaction_status == -ENOTCONN) { + u64_stats_update_end(&link->tx_syncp); + return; + } + + u64_stats_inc(&link->tx_errors); + } else { + u64_stats_inc(&link->tx_packets); + u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd); + } + u64_stats_update_end(&link->tx_syncp); + + if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE)) + netif_wake_queue(ndev); +} + +static int mhi_mbim_ndo_open(struct net_device *ndev) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + /* Feed the MHI rx buffer pool */ + schedule_delayed_work(&link->mbim->rx_refill, 0); + + /* Carrier is established via out-of-band channel (e.g. qmi) */ + netif_carrier_on(ndev); + + netif_start_queue(ndev); + + return 0; +} + +static int mhi_mbim_ndo_stop(struct net_device *ndev) +{ + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + return 0; +} + +static const struct net_device_ops mhi_mbim_ndo = { + .ndo_open = mhi_mbim_ndo_open, + .ndo_stop = mhi_mbim_ndo_stop, + .ndo_start_xmit = mhi_mbim_ndo_xmit, + .ndo_get_stats64 = mhi_mbim_ndo_get_stats64, +}; + +static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id, + struct netlink_ext_ack *extack) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + struct mhi_mbim_context *mbim = ctxt; + + link->session = if_id; + link->mbim = mbim; + link->ndev = ndev; + u64_stats_init(&link->rx_syncp); + u64_stats_init(&link->tx_syncp); + + rcu_read_lock(); + if (mhi_mbim_get_link_rcu(mbim, if_id)) { + rcu_read_unlock(); + return -EEXIST; + } + rcu_read_unlock(); + + /* Already protected by RTNL lock */ + hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]); + + return register_netdevice(ndev); +} + +static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev, + struct list_head *head) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + hlist_del_init_rcu(&link->hlnode); + synchronize_rcu(); + + unregister_netdevice_queue(ndev, head); +} + +static void mhi_mbim_setup(struct net_device *ndev) +{ + ndev->header_ops = NULL; /* No header */ + ndev->type = ARPHRD_RAWIP; + ndev->needed_headroom = sizeof(struct mbim_tx_hdr); + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->flags = IFF_POINTOPOINT | IFF_NOARP; + ndev->netdev_ops = &mhi_mbim_ndo; + ndev->mtu = MHI_MBIM_DEFAULT_MTU; + ndev->min_mtu = ETH_MIN_MTU; + ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom; + ndev->tx_queue_len = 1000; +} + +static const struct wwan_ops mhi_mbim_wwan_ops = { + .priv_size = sizeof(struct mhi_mbim_link), + .setup = mhi_mbim_setup, + .newlink = mhi_mbim_newlink, + .dellink = mhi_mbim_dellink, +}; + +static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + struct mhi_mbim_context *mbim; + int err; + + mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL); + if (!mbim) + return -ENOMEM; + + spin_lock_init(&mbim->tx_lock); + dev_set_drvdata(&mhi_dev->dev, mbim); + mbim->mdev = mhi_dev; + mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU; + + INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); + + /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); ++ err = mhi_prepare_for_transfer(mhi_dev); + if (err) + return err; + + /* Number of transfer descriptors determines size of the queue */ + mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + /* Register wwan link ops with MHI controller representing WWAN instance */ + return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0); +} + +static void mhi_mbim_remove(struct mhi_device *mhi_dev) +{ + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + + mhi_unprepare_from_transfer(mhi_dev); + cancel_delayed_work_sync(&mbim->rx_refill); + wwan_unregister_ops(&cntrl->mhi_dev->dev); + kfree_skb(mbim->skbagg_head); + dev_set_drvdata(&mhi_dev->dev, NULL); +} + +static const struct mhi_device_id mhi_mbim_id_table[] = { + /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */ + { .chan = "IP_HW0_MBIM", .driver_data = 0 }, + {} +}; +MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table); + +static struct mhi_driver mhi_mbim_driver = { + .probe = mhi_mbim_probe, + .remove = mhi_mbim_remove, + .dl_xfer_cb = mhi_mbim_dl_callback, + .ul_xfer_cb = mhi_mbim_ul_callback, + .id_table = mhi_mbim_id_table, + .driver = { + .name = "mhi_wwan_mbim", + .owner = THIS_MODULE, + }, +}; + +module_mhi_driver(mhi_mbim_driver); + +MODULE_AUTHOR("Loic Poulain loic.poulain@linaro.org"); +MODULE_DESCRIPTION("Network/MBIM over MHI"); +MODULE_LICENSE("GPL v2"); diff --combined include/linux/memcontrol.h index f0ee30881ca9,24797929d8a1..20151c4f1e0e --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@@ -612,12 -612,15 +612,15 @@@ static inline bool mem_cgroup_disabled( return !cgroup_subsys_enabled(memory_cgrp_subsys); }
- static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) + static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return;
/* * There is no reclaim protection applied to a targeted reclaim. @@@ -653,13 -656,10 +656,10 @@@ * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return;
- return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); }
void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@@ -1147,11 -1147,12 +1147,12 @@@ static inline void memcg_memory_event_m { }
- static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) + static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; }
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@@ -1581,8 -1582,7 +1582,8 @@@ static inline void mem_cgroup_flush_for #endif /* CONFIG_CGROUP_WRITEBACK */
struct sock; -bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, + gfp_t gfp_mask); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; diff --combined include/linux/mhi.h index c493a80cb453,944aa3aa3035..beb918328eef --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@@ -356,7 -356,6 +356,7 @@@ struct mhi_controller_config * @fbc_download: MHI host needs to do complete image transfer (optional) * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) + * @mru: the default MRU for the MHI device * * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) @@@ -449,7 -448,6 +449,7 @@@ struct mhi_controller bool fbc_download; bool wake_set; unsigned long irq_flags; + u32 mru; };
/** @@@ -721,13 -719,8 +721,8 @@@ void mhi_device_put(struct mhi_device * * host and device execution environments match and * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels - * @flags: MHI channel flags */ - int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, - unsigned int flags); - - /* Automatically allocate and queue inbound buffers */ - #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) + int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/** * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. diff --combined kernel/bpf/verifier.c index e5f2b23bb7c9,49f07e2bf23b..9134aedfdb7d --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -255,7 -255,6 +255,7 @@@ struct bpf_call_arg_meta int mem_size; u64 msize_max_value; int ref_obj_id; + int map_uid; int func_id; struct btf *btf; u32 btf_id; @@@ -735,10 -734,6 +735,10 @@@ static void print_verifier_state(struc if (state->refs[i].id) verbose(env, ",%d", state->refs[i].id); } + if (state->in_callback_fn) + verbose(env, " cb"); + if (state->in_async_callback_fn) + verbose(env, " async_cb"); verbose(env, "\n"); }
@@@ -1140,10 -1135,6 +1140,10 @@@ static void mark_ptr_not_null_reg(struc if (map->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = map->inner_map_meta; + /* transfer reg's id which is unique for every map_lookup_elem + * as UID of the inner map. + */ + reg->map_uid = reg->id; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { reg->type = PTR_TO_XDP_SOCK; } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || @@@ -1531,54 -1522,6 +1531,54 @@@ static void init_func_state(struct bpf_ init_reg_state(env, state); }
+/* Similar to push_stack(), but for async callbacks */ +static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx, + int subprog) +{ + struct bpf_verifier_stack_elem *elem; + struct bpf_func_state *frame; + + elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); + if (!elem) + goto err; + + elem->insn_idx = insn_idx; + elem->prev_insn_idx = prev_insn_idx; + elem->next = env->head; + elem->log_pos = env->log.len_used; + env->head = elem; + env->stack_size++; + if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { + verbose(env, + "The sequence of %d jumps is too complex for async cb.\n", + env->stack_size); + goto err; + } + /* Unlike push_stack() do not copy_verifier_state(). + * The caller state doesn't matter. + * This is async callback. It starts in a fresh stack. + * Initialize it similar to do_check_common(). + */ + elem->st.branches = 1; + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + goto err; + init_func_state(env, frame, + BPF_MAIN_FUNC /* callsite */, + 0 /* frameno within this callchain */, + subprog /* subprog number within this prog */); + elem->st.frame[0] = frame; + return &elem->st; +err: + free_verifier_state(env->cur_state, true); + env->cur_state = NULL; + /* pop all elements and return */ + while (!pop_stack(env, NULL, NULL, false)); + return NULL; +} + + enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ @@@ -3274,15 -3217,6 +3274,15 @@@ static int check_map_access(struct bpf_ return -EACCES; } } + if (map_value_has_timer(map)) { + u32 t = map->timer_off; + + if (reg->smin_value + off < t + sizeof(struct bpf_timer) && + t < reg->umax_value + off + size) { + verbose(env, "bpf_timer cannot be accessed directly by load/store\n"); + return -EACCES; + } + } return err; }
@@@ -3685,8 -3619,6 +3685,8 @@@ process_func continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { + int next_insn; + if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) continue; /* remember insn and function to return to */ @@@ -3694,22 -3626,13 +3694,22 @@@ ret_prog[frame] = idx;
/* find the callee */ - i = i + insn[i].imm + 1; - idx = find_subprog(env, i); + next_insn = i + insn[i].imm + 1; + idx = find_subprog(env, next_insn); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", - i); + next_insn); return -EFAULT; } + if (subprog[idx].is_async_cb) { + if (subprog[idx].has_tail_call) { + verbose(env, "verifier bug. subprog has tail_call and async cb\n"); + return -EFAULT; + } + /* async callbacks don't increase bpf prog stack size */ + continue; + } + i = next_insn;
if (subprog[idx].has_tail_call) tail_call_reachable = true; @@@ -4711,54 -4634,6 +4711,54 @@@ static int process_spin_lock(struct bpf return 0; }
+static int process_timer_func(struct bpf_verifier_env *env, int regno, + struct bpf_call_arg_meta *meta) +{ + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + bool is_const = tnum_is_const(reg->var_off); + struct bpf_map *map = reg->map_ptr; + u64 val = reg->var_off.value; + + if (!is_const) { + verbose(env, + "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", + regno); + return -EINVAL; + } + if (!map->btf) { + verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", + map->name); + return -EINVAL; + } + if (!map_value_has_timer(map)) { + if (map->timer_off == -E2BIG) + verbose(env, + "map '%s' has more than one 'struct bpf_timer'\n", + map->name); + else if (map->timer_off == -ENOENT) + verbose(env, + "map '%s' doesn't have 'struct bpf_timer'\n", + map->name); + else + verbose(env, + "map '%s' is not a struct type or bpf_timer is mangled\n", + map->name); + return -EINVAL; + } + if (map->timer_off != val + reg->off) { + verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", + val + reg->off, map->timer_off); + return -EINVAL; + } + if (meta->map_ptr) { + verbose(env, "verifier bug. Two map pointers in a timer helper\n"); + return -EFAULT; + } + meta->map_uid = reg->map_uid; + meta->map_ptr = map; + return 0; +} + static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || @@@ -4891,7 -4766,6 +4891,7 @@@ static const struct bpf_reg_types percp static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; +static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, @@@ -4923,7 -4797,6 +4923,7 @@@ [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types, [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, + [ARG_PTR_TO_TIMER] = &timer_types, };
static int check_reg_type(struct bpf_verifier_env *env, u32 regno, @@@ -5053,29 -4926,7 +5053,29 @@@ skip_type_check
if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ + if (meta->map_ptr) { + /* Use map_uid (which is unique id of inner map) to reject: + * inner_map1 = bpf_map_lookup_elem(outer_map, key1) + * inner_map2 = bpf_map_lookup_elem(outer_map, key2) + * if (inner_map1 && inner_map2) { + * timer = bpf_map_lookup_elem(inner_map1); + * if (timer) + * // mismatch would have been allowed + * bpf_timer_init(timer, inner_map2); + * } + * + * Comparing map_ptr is enough to distinguish normal and outer maps. + */ + if (meta->map_ptr != reg->map_ptr || + meta->map_uid != reg->map_uid) { + verbose(env, + "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", + meta->map_uid, reg->map_uid); + return -EINVAL; + } + } meta->map_ptr = reg->map_ptr; + meta->map_uid = reg->map_uid; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within @@@ -5127,9 -4978,6 +5127,9 @@@ verbose(env, "verifier internal error\n"); return -EFAULT; } + } else if (arg_type == ARG_PTR_TO_TIMER) { + if (process_timer_func(env, regno, meta)) + return -EACCES; } else if (arg_type == ARG_PTR_TO_FUNC) { meta->subprogno = reg->subprogno; } else if (arg_type_is_mem_ptr(arg_type)) { @@@ -5302,8 -5150,6 +5302,6 @@@ static int check_map_func_compatibility case BPF_MAP_TYPE_RINGBUF: if (func_id != BPF_FUNC_ringbuf_output && func_id != BPF_FUNC_ringbuf_reserve && - func_id != BPF_FUNC_ringbuf_submit && - func_id != BPF_FUNC_ringbuf_discard && func_id != BPF_FUNC_ringbuf_query) goto error; break; @@@ -5412,6 -5258,12 +5410,12 @@@ if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; + case BPF_FUNC_ringbuf_output: + case BPF_FUNC_ringbuf_reserve: + case BPF_FUNC_ringbuf_query: + if (map->map_type != BPF_MAP_TYPE_RINGBUF) + goto error; + break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; @@@ -5745,31 -5597,6 +5749,31 @@@ static int __check_func_call(struct bpf } }
+ if (insn->code == (BPF_JMP | BPF_CALL) && + insn->imm == BPF_FUNC_timer_set_callback) { + struct bpf_verifier_state *async_cb; + + /* there is no real recursion here. timer callbacks are async */ + env->subprog_info[subprog].is_async_cb = true; + async_cb = push_async_cb(env, env->subprog_info[subprog].start, + *insn_idx, subprog); + if (!async_cb) + return -EFAULT; + callee = async_cb->frame[0]; + callee->async_entry_cnt = caller->async_entry_cnt + 1; + + /* Convert bpf_timer_set_callback() args into timer callback args */ + err = set_callee_state_cb(env, caller, callee, *insn_idx); + if (err) + return err; + + clear_caller_saved_regs(env, caller->regs); + mark_reg_unknown(env, caller->regs, BPF_REG_0); + caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + /* continue with next insn after call */ + return 0; + } + callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; @@@ -5897,35 -5724,6 +5901,35 @@@ static int set_map_elem_callback_state( return 0; }
+static int set_timer_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) +{ + struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; + + /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); + * callback_fn(struct bpf_map *map, void *key, void *value); + */ + callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; + __mark_reg_known_zero(&callee->regs[BPF_REG_1]); + callee->regs[BPF_REG_1].map_ptr = map_ptr; + + callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; + __mark_reg_known_zero(&callee->regs[BPF_REG_2]); + callee->regs[BPF_REG_2].map_ptr = map_ptr; + + callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; + __mark_reg_known_zero(&callee->regs[BPF_REG_3]); + callee->regs[BPF_REG_3].map_ptr = map_ptr; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + callee->in_async_callback_fn = true; + return 0; +} + static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; @@@ -6139,29 -5937,6 +6143,29 @@@ static int check_bpf_snprintf_call(stru return err; }
+static int check_get_func_ip(struct bpf_verifier_env *env) +{ + enum bpf_attach_type eatype = env->prog->expected_attach_type; + enum bpf_prog_type type = resolve_prog_type(env->prog); + int func_id = BPF_FUNC_get_func_ip; + + if (type == BPF_PROG_TYPE_TRACING) { + if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT && + eatype != BPF_MODIFY_RETURN) { + verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", + func_id_name(func_id), func_id); + return -ENOTSUPP; + } + return 0; + } else if (type == BPF_PROG_TYPE_KPROBE) { + return 0; + } + + verbose(env, "func %s#%d not supported for program type %d\n", + func_id_name(func_id), func_id, type); + return -ENOTSUPP; +} + static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx_p) { @@@ -6276,13 -6051,6 +6280,13 @@@ return -EINVAL; }
+ if (func_id == BPF_FUNC_timer_set_callback) { + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_timer_callback_state); + if (err < 0) + return -EINVAL; + } + if (func_id == BPF_FUNC_snprintf) { err = check_bpf_snprintf_call(env, regs); if (err < 0) @@@ -6318,7 -6086,6 +6322,7 @@@ return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; + regs[BPF_REG_0].map_uid = meta.map_uid; if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; if (map_value_has_spin_lock(meta.map_ptr)) @@@ -6440,12 -6207,6 +6444,12 @@@ if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) env->prog->call_get_stack = true;
+ if (func_id == BPF_FUNC_get_func_ip) { + if (check_get_func_ip(env)) + return -ENOTSUPP; + env->prog->call_get_func_ip = true; + } + if (changes_data) clear_all_pkt_pointers(env); return 0; @@@ -9326,8 -9087,7 +9330,8 @@@ static int check_return_code(struct bpf struct tnum range = tnum_range(0, 1); enum bpf_prog_type prog_type = resolve_prog_type(env->prog); int err; - const bool is_subprog = env->cur_state->frame[0]->subprogno; + struct bpf_func_state *frame = env->cur_state->frame[0]; + const bool is_subprog = frame->subprogno;
/* LSM and struct_ops func-ptr's return type could be "void" */ if (!is_subprog && @@@ -9352,22 -9112,6 +9356,22 @@@ }
reg = cur_regs(env) + BPF_REG_0; + + if (frame->in_async_callback_fn) { + /* enforce return zero from async callbacks like timer */ + if (reg->type != SCALAR_VALUE) { + verbose(env, "In async callback the register R0 is not a known value (%s)\n", + reg_type_str[reg->type]); + return -EINVAL; + } + + if (!tnum_in(tnum_const(0), reg->var_off)) { + verbose_invalid_scalar(env, reg, &range, "async callback", "R0"); + return -EINVAL; + } + return 0; + } + if (is_subprog) { if (reg->type != SCALAR_VALUE) { verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", @@@ -9582,12 -9326,8 +9586,12 @@@ static int visit_func_call_insn(int t, init_explored_state(env, t + 1); if (visit_callee) { init_explored_state(env, t); - ret = push_insn(t, t + insns[t].imm + 1, BRANCH, - env, false); + ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, + /* It's ok to allow recursion from CFG point of + * view. __check_func_call() will do the actual + * check. + */ + bpf_pseudo_func(insns + t)); } return ret; } @@@ -9615,13 -9355,6 +9619,13 @@@ static int visit_insn(int t, int insn_c return DONE_EXPLORING;
case BPF_CALL: + if (insns[t].imm == BPF_FUNC_timer_set_callback) + /* Mark this call insn to trigger is_state_visited() check + * before call itself is processed by __check_func_call(). + * Otherwise new async state will be pushed for further + * exploration. + */ + init_explored_state(env, t); return visit_func_call_insn(t, insn_cnt, insns, env, insns[t].src_reg == BPF_PSEUDO_CALL);
@@@ -10630,25 -10363,9 +10634,25 @@@ static int is_state_visited(struct bpf_ states_cnt++; if (sl->state.insn_idx != insn_idx) goto next; + if (sl->state.branches) { - if (states_maybe_looping(&sl->state, cur) && - states_equal(env, &sl->state, cur)) { + struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; + + if (frame->in_async_callback_fn && + frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { + /* Different async_entry_cnt means that the verifier is + * processing another entry into async callback. + * Seeing the same state is not an indication of infinite + * loop or infinite recursion. + * But finding the same state doesn't mean that it's safe + * to stop processing the current state. The previous state + * hasn't yet reached bpf_exit, since state.branches > 0. + * Checking in_async_callback_fn alone is not enough either. + * Since the verifier still needs to catch infinite loops + * inside async callbacks. + */ + } else if (states_maybe_looping(&sl->state, cur) && + states_equal(env, &sl->state, cur)) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); return -EINVAL; @@@ -11697,11 -11414,10 +11701,11 @@@ static void convert_pseudo_ld_imm64(str * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ -static int adjust_insn_aux_data(struct bpf_verifier_env *env, - struct bpf_prog *new_prog, u32 off, u32 cnt) +static void adjust_insn_aux_data(struct bpf_verifier_env *env, + struct bpf_insn_aux_data *new_data, + struct bpf_prog *new_prog, u32 off, u32 cnt) { - struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + struct bpf_insn_aux_data *old_data = env->insn_aux_data; struct bpf_insn *insn = new_prog->insnsi; u32 old_seen = old_data[off].seen; u32 prog_len; @@@ -11714,9 -11430,12 +11718,9 @@@ old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
if (cnt == 1) - return 0; + return; prog_len = new_prog->len; - new_data = vzalloc(array_size(prog_len, - sizeof(struct bpf_insn_aux_data))); - if (!new_data) - return -ENOMEM; + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); @@@ -11727,6 -11446,7 +11731,6 @@@ } env->insn_aux_data = new_data; vfree(old_data); - return 0; }
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) @@@ -11761,14 -11481,6 +11765,14 @@@ static struct bpf_prog *bpf_patch_insn_ const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; + struct bpf_insn_aux_data *new_data = NULL; + + if (len > 1) { + new_data = vzalloc(array_size(env->prog->len + len - 1, + sizeof(struct bpf_insn_aux_data))); + if (!new_data) + return NULL; + }
new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (IS_ERR(new_prog)) { @@@ -11776,10 -11488,10 +11780,10 @@@ verbose(env, "insn %d cannot be patched due to 16-bit range\n", env->insn_aux_data[off].orig_idx); + vfree(new_data); return NULL; } - if (adjust_insn_aux_data(env, new_prog, off, len)) - return NULL; + adjust_insn_aux_data(env, new_data, new_prog, off, len); adjust_subprog_starts(env, off, len); adjust_poke_descs(new_prog, off, len); return new_prog; @@@ -12630,7 -12342,6 +12634,7 @@@ static int do_misc_fixups(struct bpf_ve { struct bpf_prog *prog = env->prog; bool expect_blinding = bpf_jit_blinding_enabled(prog); + enum bpf_prog_type prog_type = resolve_prog_type(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; @@@ -12848,39 -12559,6 +12852,39 @@@ continue; }
+ if (insn->imm == BPF_FUNC_timer_set_callback) { + /* The verifier will process callback_fn as many times as necessary + * with different maps and the register states prepared by + * set_timer_callback_state will be accurate. + * + * The following use case is valid: + * map1 is shared by prog1, prog2, prog3. + * prog1 calls bpf_timer_init for some map1 elements + * prog2 calls bpf_timer_set_callback for some map1 elements. + * Those that were not bpf_timer_init-ed will return -EINVAL. + * prog3 calls bpf_timer_start for some map1 elements. + * Those that were not both bpf_timer_init-ed and + * bpf_timer_set_callback-ed will return -EINVAL. + */ + struct bpf_insn ld_addrs[2] = { + BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), + }; + + insn_buf[0] = ld_addrs[0]; + insn_buf[1] = ld_addrs[1]; + insn_buf[2] = *insn; + cnt = 3; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto patch_call_imm; + } + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. @@@ -12997,21 -12675,6 +13001,21 @@@ patch_map_ops_generic continue; }
+ /* Implement bpf_get_func_ip inline. */ + if (prog_type == BPF_PROG_TYPE_TRACING && + insn->imm == BPF_FUNC_get_func_ip) { + /* Load IP address from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); + if (!new_prog) + return -ENOMEM; + + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --combined kernel/fork.c index e8b41e212110,44f4c2d83763..c97e85245dfc --- a/kernel/fork.c +++ b/kernel/fork.c @@@ -828,10 -828,10 +828,10 @@@ void __init fork_init(void for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2;
- set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
#ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", @@@ -2083,7 -2083,6 +2083,7 @@@ static __latent_entropy struct task_str #endif #ifdef CONFIG_BPF_SYSCALL RCU_INIT_POINTER(p->bpf_storage, NULL); + p->bpf_ctx = NULL; #endif
/* Perform scheduler related setup. Assign this task to a CPU. */ diff --combined net/core/rtnetlink.c index 2dcf1c084b20,662eb1c37f47..972c8cb303a5 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@@ -710,8 -710,15 +710,8 @@@ out int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) { struct sock *rtnl = net->rtnl; - int err = 0; - - NETLINK_CB(skb).dst_group = group; - if (echo) - refcount_inc(&skb->users); - netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); - if (echo) - err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); - return err; + + return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); }
int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) @@@ -726,8 -733,12 +726,8 @@@ void rtnl_notify(struct sk_buff *skb, s struct nlmsghdr *nlh, gfp_t flags) { struct sock *rtnl = net->rtnl; - int report = 0; - - if (nlh) - report = nlmsg_report(nlh);
- nlmsg_notify(rtnl, skb, pid, group, report, flags); + nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); } EXPORT_SYMBOL(rtnl_notify);
@@@ -1959,13 -1970,6 +1959,13 @@@ static bool link_master_filtered(struc return false;
master = netdev_master_upper_dev_get(dev); + + /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need + * another invalid value for ifindex to denote "no master". + */ + if (master_idx == -1) + return !!master; + if (!master || master->ifindex != master_idx) return true;
@@@ -2264,8 -2268,7 +2264,8 @@@ invalid_attr return -EINVAL; }
-static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) +static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], + struct netlink_ext_ack *extack) { if (dev) { if (tb[IFLA_ADDRESS] && @@@ -2292,7 -2295,7 +2292,7 @@@ return -EOPNOTSUPP;
if (af_ops->validate_link_af) { - err = af_ops->validate_link_af(dev, af); + err = af_ops->validate_link_af(dev, af, extack); if (err < 0) return err; } @@@ -2600,11 -2603,12 +2600,12 @@@ static int do_setlink(const struct sk_b const struct net_device_ops *ops = dev->netdev_ops; int err;
- err = validate_linkmsg(dev, tb); + err = validate_linkmsg(dev, tb, extack); if (err < 0) return err;
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { + const char *pat = ifname && ifname[0] ? ifname : NULL; struct net *net; int new_ifindex;
@@@ -2620,7 -2624,7 +2621,7 @@@ else new_ifindex = 0;
- err = __dev_change_net_namespace(dev, net, ifname, new_ifindex); + err = __dev_change_net_namespace(dev, net, pat, new_ifindex); put_net(net); if (err) goto errout; @@@ -3298,7 -3302,7 +3299,7 @@@ replay m_ops = master_dev->rtnl_link_ops; }
- err = validate_linkmsg(dev, tb); + err = validate_linkmsg(dev, tb, extack); if (err < 0) return err;
diff --combined net/ipv4/ip_gre.c index 6ebf05859acb,95419b7adf5c..177d26d8fb9c --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@@ -473,6 -473,8 +473,8 @@@ static void __gre_xmit(struct sk_buff *
static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); }
@@@ -923,7 -925,7 +925,7 @@@ static const struct net_device_ops ipgr .ndo_stop = ipgre_close, #endif .ndo_start_xmit = ipgre_xmit, - .ndo_do_ioctl = ip_tunnel_ioctl, + .ndo_siocdevprivate = ip_tunnel_siocdevprivate, .ndo_change_mtu = ip_tunnel_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_get_iflink = ip_tunnel_get_iflink, diff --combined net/ipv4/route.c index b181773d7ad3,a6f20ee35335..1e3b18797070 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -276,13 -276,12 +276,13 @@@ static int rt_cpu_seq_show(struct seq_f struct rt_cache_stat *st = v;
if (v == SEQ_START_TOKEN) { - seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); + seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); return 0; }
- seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " - " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", + seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x " + "%08x %08x %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", dst_entries_get_slow(&ipv4_dst_ops), 0, /* st->in_hit */ st->in_slow_tot, @@@ -601,14 -600,14 +601,14 @@@ static struct fib_nh_exception *fnhe_ol return oldest; }
- static inline u32 fnhe_hashfun(__be32 daddr) + static u32 fnhe_hashfun(__be32 daddr) { - static u32 fnhe_hashrnd __read_mostly; - u32 hval; + static siphash_key_t fnhe_hash_key __read_mostly; + u64 hval;
- net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); - hval = jhash_1word((__force u32)daddr, fnhe_hashrnd); - return hash_32(hval, FNHE_HASH_SHIFT); + net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); + hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key); + return hash_64(hval, FNHE_HASH_SHIFT); }
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) @@@ -1300,7 -1299,26 +1300,7 @@@ static unsigned int ipv4_default_advmss
INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst) { - const struct rtable *rt = (const struct rtable *)dst; - unsigned int mtu = rt->rt_pmtu; - - if (!mtu || time_after_eq(jiffies, rt->dst.expires)) - mtu = dst_metric_raw(dst, RTAX_MTU); - - if (mtu) - goto out; - - mtu = READ_ONCE(dst->dev->mtu); - - if (unlikely(ip_mtu_locked(dst))) { - if (rt->rt_uses_gateway && mtu > 576) - mtu = 576; - } - -out: - mtu = min_t(unsigned int, mtu, IP_MAX_MTU); - - return mtu - lwtunnel_headroom(dst->lwtstate, mtu); + return ip_dst_mtu_maybe_forward(dst, false); } EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
@@@ -2813,7 -2831,8 +2813,7 @@@ struct dst_entry *ipv4_blackhole_route( new->output = dst_discard_out;
new->dev = net->loopback_dev; - if (new->dev) - dev_hold(new->dev); + dev_hold(new->dev);
rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; diff --combined net/ipv6/ip6_fib.c index a8f118e469b7,ef75c9b05f17..1bec5b22f80d --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@@ -1341,7 -1341,7 +1341,7 @@@ static void __fib6_update_sernum_upto_r struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, lockdep_is_held(&rt->fib6_table->tb6_lock));
- /* paired with smp_rmb() in rt6_get_cookie_safe() */ + /* paired with smp_rmb() in fib6_get_cookie_safe() */ smp_wmb(); while (fn) { fn->fn_sernum = sernum; @@@ -2449,8 -2449,8 +2449,8 @@@ int __init fib6_init(void int ret = -ENOMEM;
fib6_node_kmem = kmem_cache_create("fib6_nodes", - sizeof(struct fib6_node), - 0, SLAB_HWCACHE_ALIGN, + sizeof(struct fib6_node), 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!fib6_node_kmem) goto out; diff --combined net/ipv6/ip6_gre.c index 3ad201d372d8,7a5e90e09363..7baf41d160f5 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@@ -629,6 -629,8 +629,8 @@@ drop
static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } @@@ -1244,9 -1246,8 +1246,9 @@@ static void ip6gre_tnl_parm_to_user(str memcpy(u->name, p->name, sizeof(u->name)); }
-static int ip6gre_tunnel_ioctl(struct net_device *dev, - struct ifreq *ifr, int cmd) +static int ip6gre_tunnel_siocdevprivate(struct net_device *dev, + struct ifreq *ifr, void __user *data, + int cmd) { int err = 0; struct ip6_tnl_parm2 p; @@@ -1260,7 -1261,7 +1262,7 @@@ switch (cmd) { case SIOCGETTUNNEL: if (dev == ign->fb_tunnel_dev) { - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { + if (copy_from_user(&p, data, sizeof(p))) { err = -EFAULT; break; } @@@ -1271,7 -1272,7 +1273,7 @@@ } memset(&p, 0, sizeof(p)); ip6gre_tnl_parm_to_user(&p, &t->parms); - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) + if (copy_to_user(data, &p, sizeof(p))) err = -EFAULT; break;
@@@ -1282,7 -1283,7 +1284,7 @@@ goto done;
err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) + if (copy_from_user(&p, data, sizeof(p))) goto done;
err = -EINVAL; @@@ -1319,7 -1320,7 +1321,7 @@@
memset(&p, 0, sizeof(p)); ip6gre_tnl_parm_to_user(&p, &t->parms); - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) + if (copy_to_user(data, &p, sizeof(p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); @@@ -1332,7 -1333,7 +1334,7 @@@
if (dev == ign->fb_tunnel_dev) { err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) + if (copy_from_user(&p, data, sizeof(p))) goto done; err = -ENOENT; ip6gre_tnl_parm_from_user(&p1, &p); @@@ -1399,7 -1400,7 +1401,7 @@@ static const struct net_device_ops ip6g .ndo_init = ip6gre_tunnel_init, .ndo_uninit = ip6gre_tunnel_uninit, .ndo_start_xmit = ip6gre_tunnel_xmit, - .ndo_do_ioctl = ip6gre_tunnel_ioctl, + .ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate, .ndo_change_mtu = ip6_tnl_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_get_iflink = ip6_tnl_get_iflink, diff --combined net/ipv6/route.c index 6cf4bb89ca69,c5e8ecb96426..f34137d5bf85 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -41,6 -41,7 +41,7 @@@ #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/jhash.h> + #include <linux/siphash.h> #include <net/net_namespace.h> #include <net/snmp.h> #include <net/ipv6.h> @@@ -1484,17 -1485,24 +1485,24 @@@ static void rt6_exception_remove_oldest static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { - static u32 seed __read_mostly; - u32 val; + static siphash_key_t rt6_exception_key __read_mostly; + struct { + struct in6_addr dst; + struct in6_addr src; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .dst = *dst, + }; + u64 val;
- net_get_random_once(&seed, sizeof(seed)); - val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed); + net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
#ifdef CONFIG_IPV6_SUBTREES if (src) - val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val); + combined.src = *src; #endif - return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); + val = siphash(&combined, sizeof(combined), &rt6_exception_key); + + return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); }
/* Helper function to find the cached rt in the hash table @@@ -3201,7 -3209,25 +3209,7 @@@ static unsigned int ip6_default_advmss(
INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst) { - struct inet6_dev *idev; - unsigned int mtu; - - mtu = dst_metric_raw(dst, RTAX_MTU); - if (mtu) - goto out; - - mtu = IPV6_MIN_MTU; - - rcu_read_lock(); - idev = __in6_dev_get(dst->dev); - if (idev) - mtu = idev->cnf.mtu6; - rcu_read_unlock(); - -out: - mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); - - return mtu - lwtunnel_headroom(dst->lwtstate, mtu); + return ip6_dst_mtu_maybe_forward(dst, false); } EXPORT_INDIRECT_CALLABLE(ip6_mtu);
@@@ -3626,7 -3652,8 +3634,7 @@@ out if (err) { lwtstate_put(fib6_nh->fib_nh_lws); fib6_nh->fib_nh_lws = NULL; - if (dev) - dev_put(dev); + dev_put(dev); }
return err; @@@ -6619,7 -6646,7 +6627,7 @@@ int __init ip6_route_init(void ret = -ENOMEM; ip6_dst_ops_template.kmem_cachep = kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, - SLAB_HWCACHE_ALIGN, NULL); + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!ip6_dst_ops_template.kmem_cachep) goto out;
diff --combined net/qrtr/qrtr.c index 6c61b7b1838f,0c30908628ba..b8508e35d20e --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@@ -493,7 -493,7 +493,7 @@@ int qrtr_endpoint_post(struct qrtr_endp goto err; }
- if (len != ALIGN(size, 4) + hdrlen) + if (!size || len != ALIGN(size, 4) + hdrlen) goto err;
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && @@@ -1157,14 -1157,14 +1157,14 @@@ static int qrtr_ioctl(struct socket *so rc = put_user(len, (int __user *)argp); break; case SIOCGIFADDR: - if (copy_from_user(&ifr, argp, sizeof(ifr))) { + if (get_user_ifreq(&ifr, NULL, argp)) { rc = -EFAULT; break; }
sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; *sq = ipc->us; - if (copy_to_user(argp, &ifr, sizeof(ifr))) { + if (put_user_ifreq(&ifr, argp)) { rc = -EFAULT; break; } diff --combined net/sched/sch_ets.c index 925924fab1ab,c76701ac35ab..1f857ffd1ac2 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@@ -390,7 -390,7 +390,7 @@@ static struct ets_class *ets_classify(s *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { fl = rcu_dereference_bh(q->filter_list); - err = tcf_classify(skb, fl, &res, false); + err = tcf_classify(skb, NULL, fl, &res, false); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: @@@ -660,6 -660,13 +660,13 @@@ static int ets_qdisc_change(struct Qdis sch_tree_lock(sch);
q->nbands = nbands; + for (i = nstrict; i < q->nstrict; i++) { + INIT_LIST_HEAD(&q->classes[i].alist); + if (q->classes[i].qdisc->q.qlen) { + list_add_tail(&q->classes[i].alist, &q->active); + q->classes[i].deficit = quanta[i]; + } + } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap));
linux-merge@lists.open-mesh.org