The following commit has been merged in the master branch: commit a06ee256e5d6f03fffbd088de9bf84035658cc5a Merge: bd6207202db8974ca3d3183ca0d5611d45b2973c 846e8dd47c264e0b359afed28ea88e0acdee6818 Author: David S. Miller davem@davemloft.net Date: Tue Sep 25 10:35:29 2018 -0700
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Version bump conflict in batman-adv, take what's in net-next.
iavf conflict, adjustment of netdev_ops in net-next conflicting with poll controller method removal in net.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index 7233a9ed0f5b,02a39617ec82..15565de091af --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -4528,9 -4528,9 +4528,9 @@@ F: drivers/soc/fsl/dpi
DPAA2 ETHERNET DRIVER M: Ioana Radulescu ruxandra.radulescu@nxp.com -L: linux-kernel@vger.kernel.org +L: netdev@vger.kernel.org S: Maintained -F: drivers/staging/fsl-dpaa2/ethernet +F: drivers/net/ethernet/freescale/dpaa2
DPAA2 ETHERNET SWITCH DRIVER M: Ioana Radulescu ruxandra.radulescu@nxp.com @@@ -7348,7 -7348,7 +7348,7 @@@ F: Documentation/networking/ixgb.tx F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt -F: Documentation/networking/i40evf.txt +F: Documentation/networking/iavf.txt F: Documentation/networking/ice.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ @@@ -8183,15 -8183,6 +8183,15 @@@ S: Maintaine F: net/l3mdev F: include/net/l3mdev.h
+LANTIQ / INTEL Ethernet drivers +M: Hauke Mehrtens hauke@hauke-m.de +L: netdev@vger.kernel.org +S: Maintained +F: net/dsa/tag_gswip.c +F: drivers/net/ethernet/lantiq_xrx200.c +F: drivers/net/dsa/lantiq_pce.h +F: drivers/net/dsa/intel_gswip.c + LANTIQ MIPS ARCHITECTURE M: John Crispin john@phrozen.org L: linux-mips@linux-mips.org @@@ -12269,6 -12260,7 +12269,7 @@@ F: Documentation/networking/rds.tx
RDT - RESOURCE ALLOCATION M: Fenghua Yu fenghua.yu@intel.com + M: Reinette Chatre reinette.chatre@intel.com L: linux-kernel@vger.kernel.org S: Supported F: arch/x86/kernel/cpu/intel_rdt* @@@ -13458,9 -13450,8 +13459,8 @@@ F: drivers/i2c/busses/i2c-synquacer. F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
SOCIONEXT UNIPHIER SOUND DRIVER - M: Katsuhiro Suzuki suzuki.katsuhiro@socionext.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) - S: Maintained + S: Orphan F: sound/soc/uniphier/
SOEKRIS NET48XX LED SUPPORT @@@ -15922,6 -15913,7 +15922,7 @@@ F: net/x25 X86 ARCHITECTURE (32-BIT AND 64-BIT) M: Thomas Gleixner tglx@linutronix.de M: Ingo Molnar mingo@redhat.com + M: Borislav Petkov bp@alien8.de R: "H. Peter Anvin" hpa@zytor.com M: x86@kernel.org L: linux-kernel@vger.kernel.org @@@ -15950,6 -15942,15 +15951,15 @@@ M: Borislav Petkov <bp@alien8.de S: Maintained F: arch/x86/kernel/cpu/microcode/*
+ X86 MM + M: Dave Hansen dave.hansen@linux.intel.com + M: Andy Lutomirski luto@kernel.org + M: Peter Zijlstra peterz@infradead.org + L: linux-kernel@vger.kernel.org + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm + S: Maintained + F: arch/x86/mm/ + X86 PLATFORM DRIVERS M: Darren Hart dvhart@infradead.org M: Andy Shevchenko andy@infradead.org diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 16f64c68193d,fcc2328bb0d9..40093d88353f --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -3536,16 -3536,6 +3536,16 @@@ static void bnx2x_drv_info_iscsi_stat(s */ static void bnx2x_config_mf_bw(struct bnx2x *bp) { + /* Workaround for MFW bug. + * MFW is not supposed to generate BW attention in + * single function mode. + */ + if (!IS_MF(bp)) { + DP(BNX2X_MSG_MCP, + "Ignoring MF BW config in single function mode\n"); + return; + } + if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); @@@ -12904,19 -12894,6 +12904,6 @@@ static int bnx2x_ioctl(struct net_devic } }
- #ifdef CONFIG_NET_POLL_CONTROLLER - static void poll_bnx2x(struct net_device *dev) - { - struct bnx2x *bp = netdev_priv(dev); - int i; - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - } - } - #endif - static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@@ -13123,15 -13100,11 +13110,12 @@@ static const struct net_device_ops bnx2 .ndo_tx_timeout = bnx2x_tx_timeout, .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_bnx2x, - #endif .ndo_setup_tc = __bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, + .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index f4ba9b3f8819,e1594c9df4c6..749f63beddd8 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@@ -75,17 -75,23 +75,23 @@@ static int bnxt_tc_parse_redir(struct b return 0; }
- static void bnxt_tc_parse_vlan(struct bnxt *bp, - struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) { - if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_POP: actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; - } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { + break; + case TCA_VLAN_ACT_PUSH: actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + break; + default: + return -EOPNOTSUPP; } + return 0; }
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, @@@ -134,7 -140,9 +140,9 @@@ static int bnxt_tc_parse_actions(struc
/* Push/pop VLAN */ if (is_tcf_vlan(tc_act)) { - bnxt_tc_parse_vlan(bp, actions, tc_act); + rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + if (rc) + return rc; continue; }
@@@ -181,6 -189,7 +189,6 @@@ static int bnxt_tc_parse_flow(struct bn struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; - u16 addr_type = 0;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@@ -190,6 -199,13 +198,6 @@@ return -EOPNOTSUPP; }
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); @@@ -285,6 -301,13 +293,6 @@@ flow->l4_mask.icmp.code = mask->code; }
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); diff --combined drivers/net/ethernet/ibm/emac/core.c index 5107c9450a19,129f4e9f38da..760b2ad8e295 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@@ -423,7 -423,7 +423,7 @@@ static void emac_hash_mc(struct emac_in { const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); - u32 gaht_temp[regs]; + u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i;
@@@ -1409,7 -1409,7 +1409,7 @@@ static inline u16 emac_tx_csum(struct e return 0; }
-static inline int emac_xmit_finish(struct emac_instance *dev, int len) +static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; @@@ -1436,7 -1436,7 +1436,7 @@@ }
/* Tx lock BH */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@@ -1494,8 -1494,7 +1494,8 @@@ static inline int emac_xmit_split(struc }
/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ -static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; @@@ -2678,12 -2677,17 +2678,17 @@@ static int emac_init_phy(struct emac_in if (of_phy_is_fixed_link(np)) { int res = emac_dt_mdio_probe(dev);
- if (!res) { - res = of_phy_register_fixed_link(np); - if (res) - mdiobus_unregister(dev->mii_bus); + if (res) + return res; + + res = of_phy_register_fixed_link(np); + dev->phy_dev = of_phy_find_device(np); + if (res || !dev->phy_dev) { + mdiobus_unregister(dev->mii_bus); + return res ? res : -EINVAL; } - return res; + emac_adjust_link(dev->ndev); + put_device(&dev->phy_dev->mdio.dev); } return 0; } @@@ -2965,10 -2969,6 +2970,10 @@@ static int emac_init_config(struct emac dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; }
+ /* This should never happen */ + if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS)) + return -ENXIO; + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); diff --combined drivers/net/ethernet/intel/iavf/iavf_main.c index 8c71c698a2ef,fef6d892ed4c..9f2b7b7adf6b --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@@ -1,38 -1,38 +1,38 @@@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" -/* All i40evf tracepoints are defined by the include below, which must +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" +/* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined */ #define CREATE_TRACE_POINTS -#include "i40e_trace.h" +#include "iavf_trace.h"
-static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); -static int i40evf_close(struct net_device *netdev); +static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); +static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); +static int iavf_close(struct net_device *netdev);
-char i40evf_driver_name[] = "i40evf"; -static const char i40evf_driver_string[] = - "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; +char iavf_driver_name[] = "iavf"; +static const char iavf_driver_string[] = + "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3 #define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 3 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ DRV_KERN -const char i40evf_driver_version[] = DRV_VERSION; -static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2015 Intel Corporation."; +const char iavf_driver_version[] = DRV_VERSION; +static const char iavf_copyright[] = + "Copyright (c) 2013 - 2018 Intel Corporation.";
-/* i40evf_pci_tbl - PCI Device ID Table +/* iavf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s @@@ -40,37 -40,36 +40,37 @@@ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static const struct pci_device_id i40evf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0}, +static const struct pci_device_id iavf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, /* required last entry */ {0, } };
-MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); +MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
+MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); -MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
-static struct workqueue_struct *i40evf_wq; +static struct workqueue_struct *iavf_wq;
/** - * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code + * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ -i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - u64 size, u32 alignment) +iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, + struct iavf_dma_mem *mem, + u64 size, u32 alignment) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem) return I40E_ERR_PARAM; @@@ -85,13 -84,13 +85,13 @@@ }
/** - * i40evf_free_dma_mem_d - OS specific memory free for shared code + * iavf_free_dma_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem || !mem->va) return I40E_ERR_PARAM; @@@ -101,13 -100,13 +101,13 @@@ }
/** - * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code + * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem, u32 size) +iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size) { if (!mem) return I40E_ERR_PARAM; @@@ -122,11 -121,12 +122,11 @@@ }
/** - * i40evf_free_virt_mem_d - OS specific memory free for shared code + * iavf_free_virt_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem) +iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem) { if (!mem) return I40E_ERR_PARAM; @@@ -138,17 -138,17 +138,17 @@@ }
/** - * i40evf_debug_d - OS dependent version of debug printing + * iavf_debug_d - OS dependent version of debug printing * @hw: pointer to the HW structure * @mask: debug level mask * @fmt_str: printf-type format description **/ -void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) { char buf[512]; va_list argptr;
- if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) + if (!(mask & ((struct iavf_hw *)hw)->debug_mask)) return;
va_start(argptr, fmt_str); @@@ -160,131 -160,134 +160,131 @@@ }
/** - * i40evf_schedule_reset - Set the flags and schedule a reset event + * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ -void i40evf_schedule_reset(struct i40evf_adapter *adapter) +void iavf_schedule_reset(struct iavf_adapter *adapter) { if (!(adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); } }
/** - * i40evf_tx_timeout - Respond to a Tx Hang + * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void i40evf_tx_timeout(struct net_device *netdev) +static void iavf_tx_timeout(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++; - i40evf_schedule_reset(adapter); + iavf_schedule_reset(adapter); }
/** - * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC + * iavf_misc_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ -static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) +static void iavf_misc_irq_disable(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw;
if (!adapter->msix_entries) return;
- wr32(hw, I40E_VFINT_DYN_CTL01, 0); + wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
- /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw);
synchronize_irq(adapter->msix_entries[0].vector); }
/** - * i40evf_misc_irq_enable - Enable default interrupt generation settings + * iavf_misc_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ -static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) +static void iavf_misc_irq_enable(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw;
- wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | - I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); + wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
- /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); }
/** - * i40evf_irq_disable - Mask off interrupt generation on the NIC + * iavf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ -static void i40evf_irq_disable(struct i40evf_adapter *adapter) +static void iavf_irq_disable(struct iavf_adapter *adapter) { int i; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw;
if (!adapter->msix_entries) return;
for (i = 1; i < adapter->num_msix_vectors; i++) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); synchronize_irq(adapter->msix_entries[i].vector); } - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); }
/** - * i40evf_irq_enable_queues - Enable interrupt for specified queues + * iavf_irq_enable_queues - Enable interrupt for specified queues * @adapter: board private structure * @mask: bitmap of queues to enable **/ -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) +void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int i;
for (i = 1; i < adapter->num_msix_vectors; i++) { if (mask & BIT(i - 1)) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), - I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } }
/** - * i40evf_irq_enable - Enable default interrupt generation settings + * iavf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure * @flush: boolean value whether to run rd32() **/ -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) +void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw;
- i40evf_misc_irq_enable(adapter); - i40evf_irq_enable_queues(adapter, ~0); + iavf_misc_irq_enable(adapter); + iavf_irq_enable_queues(adapter, ~0);
if (flush) - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); }
/** - * i40evf_msix_aq - Interrupt handler for vector 0 + * iavf_msix_aq - Interrupt handler for vector 0 * @irq: interrupt number * @data: pointer to netdev **/ -static irqreturn_t i40evf_msix_aq(int irq, void *data) +static irqreturn_t iavf_msix_aq(int irq, void *data) { struct net_device *netdev = data; - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_hw *hw = &adapter->hw;
/* handle non-queue interrupts, these reads clear the registers */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, IAVF_VFINT_ICR01); + rd32(hw, IAVF_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */ schedule_work(&adapter->adminq_task); @@@ -293,13 -296,13 +293,13 @@@ }
/** - * i40evf_msix_clean_rings - MSIX mode Interrupt Handler + * iavf_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ -static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) +static irqreturn_t iavf_msix_clean_rings(int irq, void *data) { - struct i40e_q_vector *q_vector = data; + struct iavf_q_vector *q_vector = data;
if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; @@@ -310,17 -313,17 +310,17 @@@ }
/** - * i40evf_map_vector_to_rxq - associate irqs with rx queues + * iavf_map_vector_to_rxq - associate irqs with rx queues * @adapter: board private structure * @v_idx: interrupt number * @r_idx: queue number **/ static void -i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) +iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; - struct i40e_hw *hw = &adapter->hw; + struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; + struct iavf_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; @@@ -330,23 -333,23 +330,23 @@@ q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); - wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), + wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), q_vector->rx.current_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; }
/** - * i40evf_map_vector_to_txq - associate irqs with tx queues + * iavf_map_vector_to_txq - associate irqs with tx queues * @adapter: board private structure * @v_idx: interrupt number * @t_idx: queue number **/ static void -i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) +iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; - struct i40e_hw *hw = &adapter->hw; + struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; + struct iavf_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; @@@ -356,13 -359,13 +356,13 @@@ q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; - wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), + wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; }
/** - * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors + * iavf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors @@@ -371,7 -374,7 +371,7 @@@ * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ -static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) { int rings_remaining = adapter->num_active_queues; int ridx = 0, vidx = 0; @@@ -380,8 -383,8 +380,8 @@@ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (; ridx < rings_remaining; ridx++) { - i40evf_map_vector_to_rxq(adapter, vidx, ridx); - i40evf_map_vector_to_txq(adapter, vidx, ridx); + iavf_map_vector_to_rxq(adapter, vidx, ridx); + iavf_map_vector_to_txq(adapter, vidx, ridx);
/* In the case where we have more queues than vectors, continue * round-robin on vectors until all queues are mapped. @@@ -390,61 -393,38 +390,38 @@@ vidx = 0; }
- adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /** - * iavf_netpoll - A Polling 'interrupt' handler - * @netdev: network interface device structure - * - * This is used by netconsole to send skbs without having to re-enable - * interrupts. It's not called while the normal interrupt routine is executing. - **/ - static void iavf_netpoll(struct net_device *netdev) - { - struct iavf_adapter *adapter = netdev_priv(netdev); - int q_vectors = adapter->num_msix_vectors - NONQ_VECS; - int i; - - /* if interface is down do nothing */ - if (test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) - return; - - for (i = 0; i < q_vectors; i++) - iavf_msix_clean_rings(0, &adapter->q_vectors[i]); - } - - #endif /** - * i40evf_irq_affinity_notify - Callback for affinity changes + * iavf_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask * * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. **/ -static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) +static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) { - struct i40e_q_vector *q_vector = - container_of(notify, struct i40e_q_vector, affinity_notify); + struct iavf_q_vector *q_vector = + container_of(notify, struct iavf_q_vector, affinity_notify);
cpumask_copy(&q_vector->affinity_mask, mask); }
/** - * i40evf_irq_affinity_release - Callback for affinity notifier release + * iavf_irq_affinity_release - Callback for affinity notifier release * @ref: internal core kernel usage * * This is a callback function used by the irq_set_affinity_notifier function * to inform the current notification subscriber that they will no longer * receive notifications. **/ -static void i40evf_irq_affinity_release(struct kref *ref) {} +static void iavf_irq_affinity_release(struct kref *ref) {}
/** - * i40evf_request_traffic_irqs - Initialize MSI-X interrupts + * iavf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure * @basename: device basename * @@@ -452,38 -432,37 +429,38 @@@ * interrupts from the kernel. **/ static int -i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) +iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) { unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; int cpu;
- i40evf_irq_disable(adapter); + iavf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; + struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%d", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%d", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%d", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(irq_num, - i40evf_msix_clean_rings, + iavf_msix_clean_rings, 0, q_vector->name, q_vector); @@@ -493,9 -472,9 +470,9 @@@ goto free_queue_irqs; } /* register for affinity change notifications */ - q_vector->affinity_notify.notify = i40evf_irq_affinity_notify; + q_vector->affinity_notify.notify = iavf_irq_affinity_notify; q_vector->affinity_notify.release = - i40evf_irq_affinity_release; + iavf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so @@@ -519,23 -498,23 +496,23 @@@ free_queue_irqs }
/** - * i40evf_request_misc_irq - Initialize MSI-X interrupts + * iavf_request_misc_irq - Initialize MSI-X interrupts * @adapter: board private structure * * Allocates MSI-X vector 0 and requests interrupts from the kernel. This * vector is only for the admin queue, and stays active even when the netdev * is closed. **/ -static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) +static int iavf_request_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err;
snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, - &i40evf_msix_aq, 0, + &iavf_msix_aq, 0, adapter->misc_vector_name, netdev); if (err) { dev_err(&adapter->pdev->dev, @@@ -547,12 -526,12 +524,12 @@@ }
/** - * i40evf_free_traffic_irqs - Free MSI-X interrupts + * iavf_free_traffic_irqs - Free MSI-X interrupts * @adapter: board private structure * * Frees all MSI-X vectors other than 0. **/ -static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) +static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) { int vector, irq_num, q_vectors;
@@@ -570,12 -549,12 +547,12 @@@ }
/** - * i40evf_free_misc_irq - Free MSI-X miscellaneous vector + * iavf_free_misc_irq - Free MSI-X miscellaneous vector * @adapter: board private structure * * Frees MSI-X vector 0. **/ -static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) +static void iavf_free_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev;
@@@ -586,58 -565,58 +563,58 @@@ }
/** - * i40evf_configure_tx - Configure Transmit Unit after Reset + * iavf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ -static void i40evf_configure_tx(struct i40evf_adapter *adapter) +static void iavf_configure_tx(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int i;
for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); + adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); }
/** - * i40evf_configure_rx - Configure Receive Unit after Reset + * iavf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ -static void i40evf_configure_rx(struct i40evf_adapter *adapter) +static void iavf_configure_rx(struct iavf_adapter *adapter) { - unsigned int rx_buf_len = I40E_RXBUFFER_2048; - struct i40e_hw *hw = &adapter->hw; + unsigned int rx_buf_len = IAVF_RXBUFFER_2048; + struct iavf_hw *hw = &adapter->hw; int i;
/* Legacy Rx will always default to a 2048 buffer size. */ #if (PAGE_SIZE < 8192) - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { + if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { struct net_device *netdev = adapter->netdev;
/* For jumbo frames on systems with 4K pages we have to use * an order 1 page, so we might as well increase the size * of our Rx buffer to make better use of the available space */ - rx_buf_len = I40E_RXBUFFER_3072; + rx_buf_len = IAVF_RXBUFFER_3072;
/* We use a 1536 buffer size for configurations with * standard Ethernet mtu. On x86 this gives us enough room * for shared info and 192 bytes of padding. */ - if (!I40E_2K_TOO_SMALL_WITH_PADDING && + if (!IAVF_2K_TOO_SMALL_WITH_PADDING && (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; } #endif
for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_LEGACY_RX) + if (adapter->flags & IAVF_FLAG_LEGACY_RX) clear_ring_build_skb_enabled(&adapter->rx_rings[i]); else set_ring_build_skb_enabled(&adapter->rx_rings[i]); @@@ -645,7 -624,7 +622,7 @@@ }
/** - * i40evf_find_vlan - Search filter list for specific vlan filter + * iavf_find_vlan - Search filter list for specific vlan filter * @adapter: board private structure * @vlan: vlan tag * @@@ -653,9 -632,9 +630,9 @@@ * mac_vlan_list_lock. **/ static struct -i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f;
list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (vlan == f->vlan) @@@ -665,20 -644,20 +642,20 @@@ }
/** - * i40evf_add_vlan - Add a vlan filter to the list + * iavf_add_vlan - Add a vlan filter to the list * @adapter: board private structure * @vlan: VLAN tag * * Returns ptr to the filter object or NULL when no memory available. **/ static struct -i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f = NULL; + struct iavf_vlan_filter *f = NULL;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_find_vlan(adapter, vlan); + f = iavf_find_vlan(adapter, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) @@@ -689,7 -668,7 +666,7 @@@ INIT_LIST_HEAD(&f->list); list_add(&f->list, &adapter->vlan_filter_list); f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; }
clearout: @@@ -698,63 -677,63 +675,63 @@@ }
/** - * i40evf_del_vlan - Remove a vlan filter from the list + * iavf_del_vlan - Remove a vlan filter from the list * @adapter: board private structure * @vlan: VLAN tag **/ -static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) +static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_find_vlan(adapter, vlan); + f = iavf_find_vlan(adapter, vlan); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; }
spin_unlock_bh(&adapter->mac_vlan_list_lock); }
/** - * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device + * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ -static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int iavf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
if (!VLAN_ALLOWED(adapter)) return -EIO; - if (i40evf_add_vlan(adapter, vid) == NULL) + if (iavf_add_vlan(adapter, vid) == NULL) return -ENOMEM; return 0; }
/** - * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device + * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ -static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int iavf_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
if (VLAN_ALLOWED(adapter)) { - i40evf_del_vlan(adapter, vid); + iavf_del_vlan(adapter, vid); return 0; } return -EIO; }
/** - * i40evf_find_filter - Search filter list for specific mac filter + * iavf_find_filter - Search filter list for specific mac filter * @adapter: board private structure * @macaddr: the MAC address * @@@ -762,10 -741,10 +739,10 @@@ * mac_vlan_list_lock. **/ static struct -i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) +iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f;
if (!macaddr) return NULL; @@@ -778,22 -757,22 +755,22 @@@ }
/** - * i40e_add_filter - Add a mac filter to the filter list + * iavf_add_filter - Add a mac filter to the filter list * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL when no memory available. **/ static struct -i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) +iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f;
if (!macaddr) return NULL;
- f = i40evf_find_filter(adapter, macaddr); + f = iavf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) @@@ -803,7 -782,7 +780,7 @@@
list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; } @@@ -812,17 -791,17 +789,17 @@@ }
/** - * i40evf_set_mac - NDO callback to set port mac address + * iavf_set_mac - NDO callback to set port mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ -static int i40evf_set_mac(struct net_device *netdev, void *p) +static int iavf_set_mac(struct net_device *netdev, void *p) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - struct i40evf_mac_filter *f; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f; struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data)) @@@ -831,18 -810,18 +808,18 @@@ if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0;
- if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF) return -EPERM;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_find_filter(adapter, hw->mac.addr); + f = iavf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; }
- f = i40evf_add_filter(adapter, addr->sa_data); + f = iavf_add_filter(adapter, addr->sa_data);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@@ -855,35 -834,35 +832,35 @@@ }
/** - * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ -static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr) +static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
- if (i40evf_add_filter(adapter, addr)) + if (iavf_add_filter(adapter, addr)) return 0; else return -ENOMEM; }
/** - * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ -static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) +static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_mac_filter *f; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_mac_filter *f;
/* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the @@@ -893,50 -872,50 +870,50 @@@ if (ether_addr_equal(addr, netdev->dev_addr)) return 0;
- f = i40evf_find_filter(adapter, addr); + f = iavf_find_filter(adapter, addr); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } return 0; }
/** - * i40evf_set_rx_mode - NDO callback to set the netdev filters + * iavf_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ -static void i40evf_set_rx_mode(struct net_device *netdev) +static void iavf_set_rx_mode(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
spin_lock_bh(&adapter->mac_vlan_list_lock); - __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); - __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); + __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); spin_unlock_bh(&adapter->mac_vlan_list_lock);
if (netdev->flags & IFF_PROMISC && - !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + !(adapter->flags & IAVF_FLAG_PROMISC_ON)) + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; else if (!(netdev->flags & IFF_PROMISC) && - adapter->flags & I40EVF_FLAG_PROMISC_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + adapter->flags & IAVF_FLAG_PROMISC_ON) + adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
if (netdev->flags & IFF_ALLMULTI && - !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; else if (!(netdev->flags & IFF_ALLMULTI) && - adapter->flags & I40EVF_FLAG_ALLMULTI_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; + adapter->flags & IAVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; }
/** - * i40evf_napi_enable_all - enable NAPI on all queue vectors + * iavf_napi_enable_all - enable NAPI on all queue vectors * @adapter: board private structure **/ -static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) +static void iavf_napi_enable_all(struct iavf_adapter *adapter) { int q_idx; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) { @@@ -949,13 -928,13 +926,13 @@@ }
/** - * i40evf_napi_disable_all - disable NAPI on all queue vectors + * iavf_napi_disable_all - disable NAPI on all queue vectors * @adapter: board private structure **/ -static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) +static void iavf_napi_disable_all(struct iavf_adapter *adapter) { int q_idx; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) { @@@ -965,67 -944,67 +942,67 @@@ }
/** - * i40evf_configure - set up transmit and receive data structures + * iavf_configure - set up transmit and receive data structures * @adapter: board private structure **/ -static void i40evf_configure(struct i40evf_adapter *adapter) +static void iavf_configure(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i;
- i40evf_set_rx_mode(netdev); + iavf_set_rx_mode(netdev);
- i40evf_configure_tx(adapter); - i40evf_configure_rx(adapter); - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + iavf_configure_tx(adapter); + iavf_configure_rx(adapter); + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = &adapter->rx_rings[i]; + struct iavf_ring *ring = &adapter->rx_rings[i];
- i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); } }
/** - * i40evf_up_complete - Finish the last steps of bringing up a connection + * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -static void i40evf_up_complete(struct i40evf_adapter *adapter) +static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __I40EVF_RUNNING; - clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); + adapter->state = __IAVF_RUNNING; + clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
- i40evf_napi_enable_all(adapter); + iavf_napi_enable_all(adapter);
- adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN; + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); }
/** - * i40e_down - Shutdown the connection processing + * iavf_down - Shutdown the connection processing * @adapter: board private structure * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -void i40evf_down(struct i40evf_adapter *adapter) +void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct i40evf_vlan_filter *vlf; - struct i40evf_mac_filter *f; - struct i40evf_cloud_filter *cf; + struct iavf_vlan_filter *vlf; + struct iavf_mac_filter *f; + struct iavf_cloud_filter *cf;
- if (adapter->state <= __I40EVF_DOWN_PENDING) + if (adapter->state <= __IAVF_DOWN_PENDING) return;
netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter);
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@@ -1052,25 -1031,25 +1029,25 @@@ } spin_unlock_bh(&adapter->cloud_filter_list_lock);
- if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __I40EVF_RESETTING) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __IAVF_RESETTING) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; }
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); }
/** - * i40evf_acquire_msix_vectors - Setup the MSIX capability + * iavf_acquire_msix_vectors - Setup the MSIX capability * @adapter: board private structure * @vectors: number of vectors to request * @@@ -1079,7 -1058,7 +1056,7 @@@ * Returns 0 on success, negative on failure **/ static int -i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) +iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) { int err, vector_threshold;
@@@ -1113,12 -1092,12 +1090,12 @@@ }
/** - * i40evf_free_queues - Free memory for all rings + * iavf_free_queues - Free memory for all rings * @adapter: board private structure to initialize * * Free all of the memory associated with queue pairs. **/ -static void i40evf_free_queues(struct i40evf_adapter *adapter) +static void iavf_free_queues(struct iavf_adapter *adapter) { if (!adapter->vsi_res) return; @@@ -1130,14 -1109,14 +1107,14 @@@ }
/** - * i40evf_alloc_queues - Allocate memory for all rings + * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/ -static int i40evf_alloc_queues(struct i40evf_adapter *adapter) +static int iavf_alloc_queues(struct iavf_adapter *adapter) { int i, num_active_queues;
@@@ -1158,17 -1137,17 +1135,17 @@@
adapter->tx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); + sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->tx_rings) goto err_out; adapter->rx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); + sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->rx_rings) goto err_out;
for (i = 0; i < num_active_queues; i++) { - struct i40e_ring *tx_ring; - struct i40e_ring *rx_ring; + struct iavf_ring *tx_ring; + struct iavf_ring *rx_ring;
tx_ring = &adapter->tx_rings[i];
@@@ -1176,16 -1155,16 +1153,16 @@@ tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; - tx_ring->itr_setting = I40E_ITR_TX_DEF; - if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; + tx_ring->itr_setting = IAVF_ITR_TX_DEF; + if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - rx_ring->itr_setting = I40E_ITR_RX_DEF; + rx_ring->itr_setting = IAVF_ITR_RX_DEF; }
adapter->num_active_queues = num_active_queues; @@@ -1193,18 -1172,18 +1170,18 @@@ return 0;
err_out: - i40evf_free_queues(adapter); + iavf_free_queues(adapter); return -ENOMEM; }
/** - * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported + * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) +static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) { int vector, v_budget; int pairs = 0; @@@ -1234,7 -1213,7 +1211,7 @@@ for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector;
- err = i40evf_acquire_msix_vectors(adapter, v_budget); + err = iavf_acquire_msix_vectors(adapter, v_budget);
out: netif_set_real_num_rx_queues(adapter->netdev, pairs); @@@ -1243,16 -1222,16 +1220,16 @@@ }
/** - * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) +static int iavf_config_rss_aq(struct iavf_adapter *adapter) { struct i40e_aqc_get_set_rss_key_data *rss_key = (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int ret = 0;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@@ -1262,21 -1241,21 +1239,21 @@@ return -EBUSY; }
- ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); if (ret) { dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + iavf_stat_str(hw, ret), + iavf_aq_str(hw, hw->aq.asq_last_status)); return ret;
}
- ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, - adapter->rss_lut, adapter->rss_lut_size); + ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); if (ret) { dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + iavf_stat_str(hw, ret), + iavf_aq_str(hw, hw->aq.asq_last_status)); }
return ret; @@@ -1284,55 -1263,55 +1261,55 @@@ }
/** - * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * iavf_config_rss_reg - Configure RSS keys and lut by writing registers * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) +static int iavf_config_rss_reg(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; u32 *dw; u16 i;
dw = (u32 *)adapter->rss_key; for (i = 0; i <= adapter->rss_key_size / 4; i++) - wr32(hw, I40E_VFQF_HKEY(i), dw[i]); + wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut; for (i = 0; i <= adapter->rss_lut_size / 4; i++) - wr32(hw, I40E_VFQF_HLUT(i), dw[i]); + wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
- i40e_flush(hw); + iavf_flush(hw);
return 0; }
/** - * i40evf_config_rss - Configure RSS keys and lut + * iavf_config_rss - Configure RSS keys and lut * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -int i40evf_config_rss(struct i40evf_adapter *adapter) +int iavf_config_rss(struct iavf_adapter *adapter) {
if (RSS_PF(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | - I40EVF_FLAG_AQ_SET_RSS_KEY; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | + IAVF_FLAG_AQ_SET_RSS_KEY; return 0; } else if (RSS_AQ(adapter)) { - return i40evf_config_rss_aq(adapter); + return iavf_config_rss_aq(adapter); } else { - return i40evf_config_rss_reg(adapter); + return iavf_config_rss_reg(adapter); } }
/** - * i40evf_fill_rss_lut - Fill the lut with default values + * iavf_fill_rss_lut - Fill the lut with default values * @adapter: board private structure **/ -static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) +static void iavf_fill_rss_lut(struct iavf_adapter *adapter) { u16 i;
@@@ -1341,46 -1320,47 +1318,46 @@@ }
/** - * i40evf_init_rss - Prepare for RSS + * iavf_init_rss - Prepare for RSS * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_init_rss(struct i40evf_adapter *adapter) +static int iavf_init_rss(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int ret;
if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; else - adapter->hena = I40E_DEFAULT_RSS_HENA; + adapter->hena = IAVF_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); }
- i40evf_fill_rss_lut(adapter); - + iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = i40evf_config_rss(adapter); + ret = iavf_config_rss(adapter);
return ret; }
/** - * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors + * iavf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ -static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) +static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) { int q_idx = 0, num_q_vectors; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), @@@ -1396,21 -1376,21 +1373,21 @@@ q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); + iavf_napi_poll, NAPI_POLL_WEIGHT); }
return 0; }
/** - * i40evf_free_q_vectors - Free memory allocated for interrupt vectors + * iavf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ -static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) +static void iavf_free_q_vectors(struct iavf_adapter *adapter) { int q_idx, num_q_vectors; int napi_vectors; @@@ -1422,8 -1402,7 +1399,8 @@@ napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; + struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; + if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); } @@@ -1432,11 -1411,11 +1409,11 @@@ }
/** - * i40evf_reset_interrupt_capability - Reset MSIX setup + * iavf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) +void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) { if (!adapter->msix_entries) return; @@@ -1447,15 -1426,15 +1424,15 @@@ }
/** - * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init + * iavf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) +int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) { int err;
- err = i40evf_alloc_queues(adapter); + err = iavf_alloc_queues(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); @@@ -1463,7 -1442,7 +1440,7 @@@ }
rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); + err = iavf_set_interrupt_capability(adapter); rtnl_unlock(); if (err) { dev_err(&adapter->pdev->dev, @@@ -1471,7 -1450,7 +1448,7 @@@ goto err_set_interrupt; }
- err = i40evf_alloc_q_vectors(adapter); + err = iavf_alloc_q_vectors(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); @@@ -1494,18 -1473,18 +1471,18 @@@
return 0; err_alloc_q_vectors: - i40evf_reset_interrupt_capability(adapter); + iavf_reset_interrupt_capability(adapter); err_set_interrupt: - i40evf_free_queues(adapter); + iavf_free_queues(adapter); err_alloc_queues: return err; }
/** - * i40evf_free_rss - Free memory used by RSS structs + * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure **/ -static void i40evf_free_rss(struct i40evf_adapter *adapter) +static void iavf_free_rss(struct iavf_adapter *adapter) { kfree(adapter->rss_key); adapter->rss_key = NULL; @@@ -1515,52 -1494,52 +1492,52 @@@ }
/** - * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors + * iavf_reinit_interrupt_scheme - Reallocate queues and vectors * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) +static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err;
if (netif_running(netdev)) - i40evf_free_traffic_irqs(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); - i40evf_free_queues(adapter); + iavf_free_traffic_irqs(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter); + iavf_free_queues(adapter);
- err = i40evf_init_interrupt_scheme(adapter); + err = iavf_init_interrupt_scheme(adapter); if (err) goto err;
netif_tx_stop_all_queues(netdev);
- err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); if (err) goto err;
- set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
- i40evf_map_rings_to_vectors(adapter); + iavf_map_rings_to_vectors(adapter);
if (RSS_AQ(adapter)) - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else - err = i40evf_init_rss(adapter); + err = iavf_init_rss(adapter); err: return err; }
/** - * i40evf_watchdog_timer - Periodic call-back timer + * iavf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long **/ -static void i40evf_watchdog_timer(struct timer_list *t) +static void iavf_watchdog_timer(struct timer_list *t) { - struct i40evf_adapter *adapter = from_timer(adapter, t, + struct iavf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
schedule_work(&adapter->watchdog_task); @@@ -1568,31 -1547,31 +1545,31 @@@ }
/** - * i40evf_watchdog_task - Periodic call-back task + * iavf_watchdog_task - Periodic call-back task * @work: pointer to work_struct **/ -static void i40evf_watchdog_task(struct work_struct *work) +static void iavf_watchdog_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, watchdog_task); - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; u32 reg_val;
- if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) goto restart_watchdog;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || (reg_val == VIRTCHNL_VFR_COMPLETED)) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __I40EVF_STARTUP; - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->state = __IAVF_STARTUP; + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; schedule_delayed_work(&adapter->init_task, 10); - clear_bit(__I40EVF_IN_CRITICAL_TASK, + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); /* Don't reschedule the watchdog, since we've restarted * the init task. When init_task contacts the PF and @@@ -1606,15 -1585,15 +1583,15 @@@ goto watchdog_done; }
- if ((adapter->state < __I40EVF_DOWN) || - (adapter->flags & I40EVF_FLAG_RESET_PENDING)) + if ((adapter->state < __IAVF_DOWN) || + (adapter->flags & IAVF_FLAG_RESET_PENDING)) goto watchdog_done;
/* check for reset */ - reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { - adapter->state = __I40EVF_RESETTING; - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) { + adapter->state = __IAVF_RESETTING; + adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); schedule_work(&adapter->reset_task); adapter->aq_required = 0; @@@ -1626,140 -1605,140 +1603,140 @@@ * here so we don't race on the admin queue. */ if (adapter->current_op) { - if (!i40evf_asq_done(hw)) { + if (!iavf_asq_done(hw)) { dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); - i40evf_send_api_ver(adapter); + iavf_send_api_ver(adapter); } goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { - i40evf_send_vf_config_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) { + iavf_send_vf_config_msg(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { - i40evf_disable_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { + iavf_disable_queues(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { - i40evf_map_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { + iavf_map_queues(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { - i40evf_add_ether_addrs(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { + iavf_add_ether_addrs(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { - i40evf_add_vlans(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { + iavf_add_vlans(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { - i40evf_del_ether_addrs(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { + iavf_del_ether_addrs(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { - i40evf_del_vlans(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { + iavf_del_vlans(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { - i40evf_enable_vlan_stripping(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { + iavf_enable_vlan_stripping(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { - i40evf_disable_vlan_stripping(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { + iavf_disable_vlan_stripping(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { - i40evf_configure_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { + iavf_configure_queues(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { - i40evf_enable_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { + iavf_enable_queues(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { /* This message goes straight to the firmware, not the * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - i40evf_init_rss(adapter); - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + iavf_init_rss(adapter); + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { - i40evf_get_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { + iavf_get_hena(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { - i40evf_set_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { + iavf_set_hena(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { - i40evf_set_rss_key(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { + iavf_set_rss_key(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { - i40evf_set_rss_lut(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { + iavf_set_rss_lut(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { - i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { + iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { - i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { + iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; }
- if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && - (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { - i40evf_set_promiscuous(adapter, 0); + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { + iavf_set_promiscuous(adapter, 0); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { - i40evf_enable_channels(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { + iavf_enable_channels(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { - i40evf_disable_channels(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { + iavf_disable_channels(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { - i40evf_add_cloud_filter(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { + iavf_add_cloud_filter(adapter); goto watchdog_done; }
- if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { - i40evf_del_cloud_filter(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { + iavf_del_cloud_filter(adapter); goto watchdog_done; }
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
- if (adapter->state == __I40EVF_RUNNING) - i40evf_request_stats(adapter); + if (adapter->state == __IAVF_RUNNING) + iavf_request_stats(adapter); watchdog_done: - if (adapter->state == __I40EVF_RUNNING) - i40evf_detect_recover_hung(&adapter->vsi); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: - if (adapter->state == __I40EVF_REMOVE) + if (adapter->state == __IAVF_REMOVE) return; if (adapter->aq_required) mod_timer(&adapter->watchdog_timer, @@@ -1769,28 -1748,28 +1746,28 @@@ schedule_work(&adapter->adminq_task); }
-static void i40evf_disable_vf(struct i40evf_adapter *adapter) +static void iavf_disable_vf(struct iavf_adapter *adapter) { - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_vlan_filter *fv, *fvtmp; - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_mac_filter *f, *ftmp; + struct iavf_vlan_filter *fv, *fvtmp; + struct iavf_cloud_filter *cf, *cftmp;
- adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - if (adapter->state == __I40EVF_RUNNING) { - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + if (adapter->state == __IAVF_RUNNING) { + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); - i40evf_free_traffic_irqs(adapter); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + iavf_free_traffic_irqs(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); }
spin_lock_bh(&adapter->mac_vlan_list_lock); @@@ -1816,41 -1795,41 +1793,41 @@@ } spin_unlock_bh(&adapter->cloud_filter_list_lock);
- i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_queues(adapter); - i40evf_free_q_vectors(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_queues(adapter); + iavf_free_q_vectors(adapter); kfree(adapter->vf_res); - i40evf_shutdown_adminq(&adapter->hw); + iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - adapter->state = __I40EVF_DOWN; + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); }
-#define I40EVF_RESET_WAIT_MS 10 -#define I40EVF_RESET_WAIT_COUNT 500 +#define IAVF_RESET_WAIT_MS 10 +#define IAVF_RESET_WAIT_COUNT 500 /** - * i40evf_reset_task - Call-back task to handle hardware reset + * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct * * During reset we need to shut down and reinitialize the admin queue * before we can use it to communicate with the PF again. We also clear * and reinit the rings because that context is lost as well. **/ -static void i40evf_reset_task(struct work_struct *work) +static void iavf_reset_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, reset_task); struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; - struct i40evf_vlan_filter *vlf; - struct i40evf_cloud_filter *cf; - struct i40evf_mac_filter *f; + struct iavf_hw *hw = &adapter->hw; + struct iavf_vlan_filter *vlf; + struct iavf_cloud_filter *cf; + struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@@ -1858,63 -1837,63 +1835,63 @@@ /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) + if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return;
- while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, + while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN | - I40EVF_FLAG_CLIENT_NEEDS_CLOSE | - I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - I40EVF_FLAG_SERVICE_CLIENT_REQUESTED); + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | + IAVF_FLAG_CLIENT_NEEDS_CLOSE | + IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + IAVF_FLAG_SERVICE_CLIENT_REQUESTED); cancel_delayed_work_sync(&adapter->client_task); - i40evf_notify_client_close(&adapter->vsi, true); + iavf_notify_client_close(&adapter->vsi, true); } - i40evf_misc_irq_disable(adapter); - if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + iavf_misc_irq_disable(adapter); + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { + adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; /* Restart the AQ here. If we have been reset but didn't * detect it, or if the PF had to reinit, our AQ will be hosed. */ - i40evf_shutdown_adminq(hw); - i40evf_init_adminq(hw); - i40evf_request_reset(adapter); + iavf_shutdown_adminq(hw); + iavf_init_adminq(hw); + iavf_request_reset(adapter); } - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + adapter->flags |= IAVF_FLAG_RESET_PENDING;
/* poll until we see the reset actually happen */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - reg_val = rd32(hw, I40E_VF_ARQLEN1) & - I40E_VF_ARQLEN1_ARQENABLE_MASK; + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & + IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) break; usleep_range(5000, 10000); } - if (i == I40EVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ }
/* wait until the reset is complete and the PF is responding to us */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { /* sleep first to make sure a minimum wait time is met */ - msleep(I40EVF_RESET_WAIT_MS); + msleep(IAVF_RESET_WAIT_MS);
- reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE) break; }
pci_set_master(adapter->pdev);
- if (i == I40EVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); - i40evf_disable_vf(adapter); - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + iavf_disable_vf(adapter); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); return; /* Do not attempt to reinit. It's dead, Jim. */ }
@@@ -1923,44 -1902,44 +1900,44 @@@ continue_reset * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __I40EVF_RUNNING) || - (adapter->state == __I40EVF_RESETTING)); + running = ((adapter->state == __IAVF_RUNNING) || + (adapter->state == __IAVF_RESETTING));
if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); + iavf_napi_disable_all(adapter); } - i40evf_irq_disable(adapter); + iavf_irq_disable(adapter);
- adapter->state = __I40EVF_RESETTING; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __IAVF_RESETTING; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
/* free the Tx/Rx rings and descriptors, might be better to just * re-use them sometime in the future */ - i40evf_free_all_rx_resources(adapter); - i40evf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_all_tx_resources(adapter);
- adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; + adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; /* kill and reinit the admin queue */ - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; - err = i40evf_init_adminq(hw); + err = iavf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); adapter->aq_required = 0;
- if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_reinit_interrupt_scheme(adapter); + if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; }
- adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@@ -1985,10 -1964,10 +1962,10 @@@ } spin_unlock_bh(&adapter->cloud_filter_list_lock);
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; - i40evf_misc_irq_enable(adapter); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; + iavf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@@ -1997,83 -1976,84 +1974,83 @@@ */ if (running) { /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); + err = iavf_setup_all_tx_resources(adapter); if (err) goto reset_err;
/* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); + err = iavf_setup_all_rx_resources(adapter); if (err) goto reset_err;
- if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_request_traffic_irqs(adapter, - netdev->name); + if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err;
- adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; }
- i40evf_configure(adapter); + iavf_configure(adapter);
- i40evf_up_complete(adapter); + iavf_up_complete(adapter);
- i40evf_irq_enable(adapter, true); + iavf_irq_enable(adapter, true); } else { - adapter->state = __I40EVF_DOWN; + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return; reset_err: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); + iavf_close(netdev); }
/** - * i40evf_adminq_task - worker thread to clean the admin queue + * iavf_adminq_task - worker thread to clean the admin queue * @work: pointer to work_struct containing our data **/ -static void i40evf_adminq_task(struct work_struct *work) +static void iavf_adminq_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, adminq_task); - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = + container_of(work, struct iavf_adapter, adminq_task); + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops v_op; - i40e_status ret, v_ret; + iavf_status ret, v_ret; u32 val, oldval; u16 pending;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out;
- event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto out;
do { - ret = i40evf_clean_arq_element(hw, &event, &pending); + ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
if (ret || !v_op) break; /* No event to process or error cleaning ARQ */
- i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, - event.msg_len); + iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, + event.msg_len); if (pending != 0) - memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); + memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending);
if ((adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || - adapter->state == __I40EVF_RESETTING) + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || + adapter->state == __IAVF_RESETTING) goto freedom;
/* check for error indications */ @@@ -2081,34 -2061,34 +2058,34 @@@ if (val == 0xdeadbeef) /* indicates device in reset */ goto freedom; oldval = val; - if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; } - if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; } - if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.arq.len, val);
val = rd32(hw, hw->aq.asq.len); oldval = val; - if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; } - if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; } - if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.asq.len, val); @@@ -2117,58 -2097,58 +2094,58 @@@ freedom kfree(event.msg_buf); out: /* re-enable Admin queue interrupt cause */ - i40evf_misc_irq_enable(adapter); + iavf_misc_irq_enable(adapter); }
/** - * i40evf_client_task - worker thread to perform client work + * iavf_client_task - worker thread to perform client work * @work: pointer to work_struct containing our data * * This task handles client interactions. Because client calls can be * reentrant, we can't handle them in the watchdog. **/ -static void i40evf_client_task(struct work_struct *work) +static void iavf_client_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, client_task.work); + struct iavf_adapter *adapter = + container_of(work, struct iavf_adapter, client_task.work);
/* If we can't get the client bit, just give up. We'll be rescheduled * later. */
- if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) + if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) return;
- if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) { - i40evf_client_subtask(adapter); - adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { + iavf_client_subtask(adapter); + adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { + iavf_notify_client_l2_params(&adapter->vsi); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { - i40evf_notify_client_close(&adapter->vsi, false); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { + iavf_notify_client_close(&adapter->vsi, false); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { - i40evf_notify_client_open(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { + iavf_notify_client_open(&adapter->vsi); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); }
/** - * i40evf_free_all_tx_resources - Free Tx Resources for All Queues + * iavf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +void iavf_free_all_tx_resources(struct iavf_adapter *adapter) { int i;
@@@ -2177,11 -2157,11 +2154,11 @@@
for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i].desc) - i40evf_free_tx_resources(&adapter->tx_rings[i]); + iavf_free_tx_resources(&adapter->tx_rings[i]); }
/** - * i40evf_setup_all_tx_resources - allocate all queues Tx resources + * iavf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or @@@ -2190,13 -2170,13 +2167,13 @@@ * * Return 0 on success, negative on failure **/ -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) +static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) { int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i].count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); + err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@@ -2208,7 -2188,7 +2185,7 @@@ }
/** - * i40evf_setup_all_rx_resources - allocate all queues Rx resources + * iavf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or @@@ -2217,13 -2197,13 +2194,13 @@@ * * Return 0 on success, negative on failure **/ -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) +static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) { int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); + err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@@ -2234,12 -2214,12 +2211,12 @@@ }
/** - * i40evf_free_all_rx_resources - Free Rx Resources for All Queues + * iavf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +void iavf_free_all_rx_resources(struct iavf_adapter *adapter) { int i;
@@@ -2248,16 -2228,16 +2225,16 @@@
for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i].desc) - i40evf_free_rx_resources(&adapter->rx_rings[i]); + iavf_free_rx_resources(&adapter->rx_rings[i]); }
/** - * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth + * iavf_validate_tx_bandwidth - validate the max Tx bandwidth * @adapter: board private structure * @max_tx_rate: max Tx bw for a tc **/ -static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, - u64 max_tx_rate) +static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, + u64 max_tx_rate) { int speed = 0, ret = 0;
@@@ -2294,7 -2274,7 +2271,7 @@@ }
/** - * i40evf_validate_channel_config - validate queue mapping info + * iavf_validate_channel_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * @@@ -2302,15 -2282,15 +2279,15 @@@ * configure queue channels is valid or not. Returns 0 on a valid * config. **/ -static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, - struct tc_mqprio_qopt_offload *mqprio_qopt) +static int iavf_validate_ch_config(struct iavf_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0;
- if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || + if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || mqprio_qopt->qopt.num_tc < 1) return -EINVAL;
@@@ -2325,24 -2305,24 +2302,24 @@@ } /*convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], - I40EVF_MBPS_DIVISOR); + IAVF_MBPS_DIVISOR); total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > I40EVF_MAX_REQ_QUEUES) + if (num_qps > IAVF_MAX_REQ_QUEUES) return -EINVAL;
- ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); + ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; }
/** - * i40evf_del_all_cloud_filters - delete all cloud filters + * iavf_del_all_cloud_filters - delete all cloud filters * on the traffic classes **/ -static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) +static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp;
spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, @@@ -2355,7 -2335,7 +2332,7 @@@ }
/** - * __i40evf_setup_tc - configure multiple traffic classes + * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type_date: tc offload data * @@@ -2365,10 -2345,10 +2342,10 @@@ * * Returns 0 on success. **/ -static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) +static int __iavf_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); struct virtchnl_vf_resource *vfres = adapter->vf_res; u8 num_tc = 0, total_qps = 0; int ret = 0, netdev_tc = 0; @@@ -2381,14 -2361,14 +2358,14 @@@
/* delete queue_channel */ if (!mqprio_qopt->qopt.hw) { - if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { + if (adapter->ch_config.state == __IAVF_TC_RUNNING) { /* reset the tc configuration */ netdev_reset_tc(netdev); adapter->num_tc = 0; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); - i40evf_del_all_cloud_filters(adapter); - adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; + iavf_del_all_cloud_filters(adapter); + adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; goto exit; } else { return -EINVAL; @@@ -2401,12 -2381,12 +2378,12 @@@ dev_err(&adapter->pdev->dev, "ADq not supported\n"); return -EOPNOTSUPP; } - if (adapter->ch_config.state != __I40EVF_TC_INVALID) { + if (adapter->ch_config.state != __IAVF_TC_INVALID) { dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); return -EINVAL; }
- ret = i40evf_validate_ch_config(adapter, mqprio_qopt); + ret = iavf_validate_ch_config(adapter, mqprio_qopt); if (ret) return ret; /* Return if same TC config is requested */ @@@ -2414,7 -2394,7 +2391,7 @@@ return 0; adapter->num_tc = num_tc;
- for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { if (i < num_tc) { adapter->ch_config.ch_info[i].count = mqprio_qopt->qopt.count[i]; @@@ -2424,7 -2404,7 +2401,7 @@@ max_tx_rate = mqprio_qopt->max_rate[i]; /* convert to Mbps */ max_tx_rate = div_u64(max_tx_rate, - I40EVF_MBPS_DIVISOR); + IAVF_MBPS_DIVISOR); adapter->ch_config.ch_info[i].max_tx_rate = max_tx_rate; } else { @@@ -2435,11 -2415,11 +2412,11 @@@ adapter->ch_config.total_qps = total_qps; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; netdev_reset_tc(netdev); /* Report the tc mapping up the stack */ netdev_set_num_tc(adapter->netdev, num_tc); - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { u16 qcount = mqprio_qopt->qopt.count[i]; u16 qoffset = mqprio_qopt->qopt.offset[i];
@@@ -2453,14 -2433,14 +2430,14 @@@ exit }
/** - * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel + * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure * @cls_flower: pointer to struct tc_cls_flower_offload * @filter: pointer to cloud filter structure */ -static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *f, - struct i40evf_cloud_filter *filter) +static int iavf_parse_cls_flower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *f, + struct iavf_cloud_filter *filter) { u16 n_proto_mask = 0; u16 n_proto_key = 0; @@@ -2491,7 -2471,7 +2468,7 @@@ f->mask);
if (mask->keyid != 0) - field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; + field_flags |= IAVF_CLOUD_FIELD_TEN_ID; }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@@ -2538,7 -2518,7 +2515,7 @@@ /* use is_broadcast and is_zero to check for all 0xf or 0 */ if (!is_zero_ether_addr(mask->dst)) { if (is_broadcast_ether_addr(mask->dst)) { - field_flags |= I40EVF_CLOUD_FIELD_OMAC; + field_flags |= IAVF_CLOUD_FIELD_OMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", mask->dst); @@@ -2548,7 -2528,7 +2525,7 @@@
if (!is_zero_ether_addr(mask->src)) { if (is_broadcast_ether_addr(mask->src)) { - field_flags |= I40EVF_CLOUD_FIELD_IMAC; + field_flags |= IAVF_CLOUD_FIELD_IMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", mask->src); @@@ -2589,7 -2569,7 +2566,7 @@@
if (mask->vlan_id) { if (mask->vlan_id == VLAN_VID_MASK) { - field_flags |= I40EVF_CLOUD_FIELD_IVLAN; + field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", mask->vlan_id); @@@ -2621,7 -2601,7 +2598,7 @@@
if (mask->dst) { if (mask->dst == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(mask->dst)); @@@ -2631,7 -2611,7 +2608,7 @@@
if (mask->src) { if (mask->src == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(mask->dst)); @@@ -2639,7 -2619,7 +2616,7 @@@ } }
- if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { + if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } @@@ -2680,7 -2660,7 +2657,7 @@@ return I40E_ERR_CONFIG; } if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP;
for (i = 0; i < 4; i++) vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); @@@ -2703,7 -2683,7 +2680,7 @@@
if (mask->src) { if (mask->src == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(mask->src)); @@@ -2713,7 -2693,7 +2690,7 @@@
if (mask->dst) { if (mask->dst == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(mask->dst)); @@@ -2736,13 -2716,13 +2713,13 @@@ }
/** - * i40evf_handle_tclass - Forward to a traffic class on the device + * iavf_handle_tclass - Forward to a traffic class on the device * @adapter: board private structure * @tc: traffic class index on the device * @filter: pointer to cloud filter structure */ -static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, - struct i40evf_cloud_filter *filter) +static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, + struct iavf_cloud_filter *filter) { if (tc == 0) return 0; @@@ -2760,15 -2740,15 +2737,15 @@@ }
/** - * i40evf_configure_clsflower - Add tc flower filters + * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct tc_cls_flower_offload */ -static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_configure_clsflower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; int err = -EINVAL, count = 50;
if (tc < 0) { @@@ -2780,7 -2760,7 +2757,7 @@@ if (!filter) return -ENOMEM;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) { if (--count == 0) goto err; @@@ -2793,11 -2773,11 +2770,11 @@@ memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; - err = i40evf_parse_cls_flower(adapter, cls_flower, filter); + err = iavf_parse_cls_flower(adapter, cls_flower, filter); if (err < 0) goto err;
- err = i40evf_handle_tclass(adapter, tc, filter); + err = iavf_handle_tclass(adapter, tc, filter); if (err < 0) goto err;
@@@ -2806,27 -2786,27 +2783,27 @@@ list_add_tail(&filter->list, &adapter->cloud_filter_list); adapter->num_cloud_filters++; filter->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; spin_unlock_bh(&adapter->cloud_filter_list_lock); err: if (err) kfree(filter);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; }
-/* i40evf_find_cf - Find the cloud filter in the list +/* iavf_find_cf - Find the cloud filter in the list * @adapter: Board private structure * @cookie: filter specific cookie * * Returns ptr to the filter object or NULL. Must be called while holding the * cloud_filter_list_lock. */ -static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, - unsigned long *cookie) +static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, + unsigned long *cookie) { - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL;
if (!cookie) return NULL; @@@ -2839,21 -2819,21 +2816,21 @@@ }
/** - * i40evf_delete_clsflower - Remove tc flower filters + * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct tc_cls_flower_offload */ -static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_delete_clsflower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; int err = 0;
spin_lock_bh(&adapter->cloud_filter_list_lock); - filter = i40evf_find_cf(adapter, &cls_flower->cookie); + filter = iavf_find_cf(adapter, &cls_flower->cookie); if (filter) { filter->del = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; } else { err = -EINVAL; } @@@ -2863,21 -2843,21 +2840,21 @@@ }
/** - * i40evf_setup_tc_cls_flower - flower classifier offloads + * iavf_setup_tc_cls_flower - flower classifier offloads * @netdev: net device to configure * @type_data: offload data */ -static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { if (cls_flower->common.chain_index) return -EOPNOTSUPP;
switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return i40evf_configure_clsflower(adapter, cls_flower); + return iavf_configure_clsflower(adapter, cls_flower); case TC_CLSFLOWER_DESTROY: - return i40evf_delete_clsflower(adapter, cls_flower); + return iavf_delete_clsflower(adapter, cls_flower); case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: @@@ -2886,46 -2866,46 +2863,46 @@@ }
/** - * i40evf_setup_tc_block_cb - block callback for tc + * iavf_setup_tc_block_cb - block callback for tc * @type: type of offload * @type_data: offload data * @cb_priv: * * This function is the block callback for traffic classes **/ -static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { switch (type) { case TC_SETUP_CLSFLOWER: - return i40evf_setup_tc_cls_flower(cb_priv, type_data); + return iavf_setup_tc_cls_flower(cb_priv, type_data); default: return -EOPNOTSUPP; } }
/** - * i40evf_setup_tc_block - register callbacks for tc + * iavf_setup_tc_block - register callbacks for tc * @netdev: network interface device structure * @f: tc offload data * * This function registers block callbacks for tc * offloads **/ -static int i40evf_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) +static int iavf_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) { - struct i40evf_adapter *adapter = netdev_priv(dev); + struct iavf_adapter *adapter = netdev_priv(dev);
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP;
switch (f->command) { case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, + return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb, adapter, adapter, f->extack); case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, + tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb, adapter); return 0; default: @@@ -2934,7 -2914,7 +2911,7 @@@ }
/** - * i40evf_setup_tc - configure multiple traffic classes + * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type: type of offload * @type_date: tc offload data @@@ -2944,21 -2924,21 +2921,21 @@@ * * Returns 0 on success **/ -static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, - void *type_data) +static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { switch (type) { case TC_SETUP_QDISC_MQPRIO: - return __i40evf_setup_tc(netdev, type_data); + return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return i40evf_setup_tc_block(netdev, type_data); + return iavf_setup_tc_block(netdev, type_data); default: return -EOPNOTSUPP; } }
/** - * i40evf_open - Called when a network interface is made active + * iavf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure @@@ -2969,71 -2949,71 +2946,71 @@@ * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int i40evf_open(struct net_device *netdev) +static int iavf_open(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int err;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; }
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000);
- if (adapter->state != __I40EVF_DOWN) { + if (adapter->state != __IAVF_DOWN) { err = -EBUSY; goto err_unlock; }
/* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); + err = iavf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx;
/* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); + err = iavf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx;
/* clear any pending interrupts, may auto mask */ - err = i40evf_request_traffic_irqs(adapter, netdev->name); + err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- i40evf_add_filter(adapter, adapter->hw.mac.addr); + iavf_add_filter(adapter, adapter->hw.mac.addr);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- i40evf_configure(adapter); + iavf_configure(adapter);
- i40evf_up_complete(adapter); + iavf_up_complete(adapter);
- i40evf_irq_enable(adapter, true); + iavf_irq_enable(adapter, true);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return 0;
err_req_irq: - i40evf_down(adapter); - i40evf_free_traffic_irqs(adapter); + iavf_down(adapter); + iavf_free_traffic_irqs(adapter); err_setup_rx: - i40evf_free_all_rx_resources(adapter); + iavf_free_all_rx_resources(adapter); err_setup_tx: - i40evf_free_all_tx_resources(adapter); + iavf_free_all_tx_resources(adapter); err_unlock: - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err; }
/** - * i40evf_close - Disables a network interface + * iavf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail @@@ -3043,41 -3023,41 +3020,41 @@@ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) * are freed, along with all transmit and receive resources. **/ -static int i40evf_close(struct net_device *netdev) +static int iavf_close(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int status;
- if (adapter->state <= __I40EVF_DOWN_PENDING) + if (adapter->state <= __IAVF_DOWN_PENDING) return 0;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000);
- set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
- i40evf_down(adapter); - adapter->state = __I40EVF_DOWN_PENDING; - i40evf_free_traffic_irqs(adapter); + iavf_down(adapter); + adapter->state = __IAVF_DOWN_PENDING; + iavf_free_traffic_irqs(adapter);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
/* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in - * i40evf_virtchnl_completion() after we get confirmation from the PF + * iavf_virtchnl_completion() after we get confirmation from the PF * driver that the rings have been stopped. * - * Also, we wait for state to transition to __I40EVF_DOWN before - * returning. State change occurs in i40evf_virtchnl_completion() after + * Also, we wait for state to transition to __IAVF_DOWN before + * returning. State change occurs in iavf_virtchnl_completion() after * VF resources are released (which occurs after PF driver processes and * responds to admin queue commands). */
status = wait_event_timeout(adapter->down_waitqueue, - adapter->state == __I40EVF_DOWN, + adapter->state == __IAVF_DOWN, msecs_to_jiffies(200)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); @@@ -3085,65 -3065,64 +3062,65 @@@ }
/** - * i40evf_change_mtu - Change the Maximum Transfer Unit + * iavf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ -static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) +static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + iavf_notify_client_l2_params(&adapter->vsi); + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + adapter->flags |= IAVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task);
return 0; }
/** - * i40e_set_features - set the netdev feature flags + * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ -static int i40evf_set_features(struct net_device *netdev, - netdev_features_t features) +static int iavf_set_features(struct net_device *netdev, + netdev_features_t features) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow changing VLAN_RX flag when VLAN is set for VF - * and return an error in this case + /* Don't allow changing VLAN_RX flag when adapter is not capable + * of VLAN offload */ - if (VLAN_ALLOWED(adapter)) { + if (!VLAN_ALLOWED(adapter)) { + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + return -EINVAL; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->aq_required |= - I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else adapter->aq_required |= - I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - return -EINVAL; + IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; }
return 0; }
/** - * i40evf_features_check - Validate encapsulated packet conforms to limits + * iavf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ -static netdev_features_t i40evf_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) +static netdev_features_t iavf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) { size_t len;
@@@ -3194,16 -3173,16 +3171,16 @@@ out_err }
/** - * i40evf_fix_features - fix up the netdev feature bits + * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits * * Returns fixed-up features bits **/ -static netdev_features_t i40evf_fix_features(struct net_device *netdev, - netdev_features_t features) +static netdev_features_t iavf_fix_features(struct net_device *netdev, + netdev_features_t features) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) features &= ~(NETIF_F_HW_VLAN_CTAG_TX | @@@ -3213,40 -3192,37 +3190,37 @@@ return features; }
-static const struct net_device_ops i40evf_netdev_ops = { - .ndo_open = i40evf_open, - .ndo_stop = i40evf_close, - .ndo_start_xmit = i40evf_xmit_frame, - .ndo_set_rx_mode = i40evf_set_rx_mode, +static const struct net_device_ops iavf_netdev_ops = { + .ndo_open = iavf_open, + .ndo_stop = iavf_close, + .ndo_start_xmit = iavf_xmit_frame, + .ndo_set_rx_mode = iavf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = i40evf_set_mac, - .ndo_change_mtu = i40evf_change_mtu, - .ndo_tx_timeout = i40evf_tx_timeout, - .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, - .ndo_features_check = i40evf_features_check, - .ndo_fix_features = i40evf_fix_features, - .ndo_set_features = i40evf_set_features, - .ndo_setup_tc = i40evf_setup_tc, + .ndo_set_mac_address = iavf_set_mac, + .ndo_change_mtu = iavf_change_mtu, + .ndo_tx_timeout = iavf_tx_timeout, + .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, + .ndo_features_check = iavf_features_check, + .ndo_fix_features = iavf_fix_features, + .ndo_set_features = iavf_set_features, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = iavf_netpoll, - #endif + .ndo_setup_tc = iavf_setup_tc, };
/** - * i40evf_check_reset_complete - check that VF reset is complete + * iavf_check_reset_complete - check that VF reset is complete * @hw: pointer to hw struct * * Returns 0 if device is ready to use, or -EBUSY if it's in reset. **/ -static int i40evf_check_reset_complete(struct i40e_hw *hw) +static int iavf_check_reset_complete(struct iavf_hw *hw) { u32 rstat; int i;
for (i = 0; i < 100; i++) { - rstat = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + rstat = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; @@@ -3256,18 -3232,18 +3230,18 @@@ }
/** - * i40evf_process_config - Process the config information we got from the PF + * iavf_process_config - Process the config information we got from the PF * @adapter: board private structure * * Verify that we have a valid config struct, and set up our netdev features * and our VSI struct. **/ -int i40evf_process_config(struct i40evf_adapter *adapter) +int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; int i, num_req_queues = adapter->num_req_queues; struct net_device *netdev = adapter->netdev; - struct i40e_vsi *vsi = &adapter->vsi; + struct iavf_vsi *vsi = &adapter->vsi; netdev_features_t hw_enc_features; netdev_features_t hw_features;
@@@ -3291,9 -3267,9 +3265,9 @@@ "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - i40evf_schedule_reset(adapter); + iavf_schedule_reset(adapter); return -ENODEV; } adapter->num_req_queues = 0; @@@ -3356,8 -3332,6 +3330,8 @@@ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ @@@ -3380,22 -3354,22 +3354,22 @@@
adapter->vsi.back = adapter; adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { adapter->rss_key_size = vfres->rss_key_size; adapter->rss_lut_size = vfres->rss_lut_size; } else { - adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; }
return 0; }
/** - * i40evf_init_task - worker thread to perform delayed initialization + * iavf_init_task - worker thread to perform delayed initialization * @work: pointer to work_struct containing our data * * This task completes the work that was begun in probe. Due to the nature @@@ -3406,65 -3380,65 +3380,65 @@@ * communications with the PF driver and set up our netdev, the watchdog * takes over. **/ -static void i40evf_init_task(struct work_struct *work) +static void iavf_init_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; int err, bufsz;
switch (adapter->state) { - case __I40EVF_STARTUP: + case __IAVF_STARTUP: /* driver loaded, probe complete */ - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - err = i40e_set_mac_type(hw); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + err = iavf_set_mac_type(hw); if (err) { dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); goto err; } - err = i40evf_check_reset_complete(hw); + err = iavf_check_reset_complete(hw); if (err) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", err); goto err; } - hw->aq.num_arq_entries = I40EVF_AQ_LEN; - hw->aq.num_asq_entries = I40EVF_AQ_LEN; - hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; - hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + hw->aq.num_arq_entries = IAVF_AQ_LEN; + hw->aq.num_asq_entries = IAVF_AQ_LEN; + hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- err = i40evf_init_adminq(hw); + err = iavf_init_adminq(hw); if (err) { dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); goto err; } - err = i40evf_send_api_ver(adapter); + err = iavf_send_api_ver(adapter); if (err) { dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); goto err; } - adapter->state = __I40EVF_INIT_VERSION_CHECK; + adapter->state = __IAVF_INIT_VERSION_CHECK; goto restart; - case __I40EVF_INIT_VERSION_CHECK: - if (!i40evf_asq_done(hw)) { + case __IAVF_INIT_VERSION_CHECK: + if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; goto err; }
/* aq msg sent, awaiting reply */ - err = i40evf_verify_api_ver(adapter); + err = iavf_verify_api_ver(adapter); if (err) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - err = i40evf_send_api_ver(adapter); + err = iavf_send_api_ver(adapter); else dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", adapter->pf_version.major, @@@ -3473,34 -3447,34 +3447,34 @@@ VIRTCHNL_VERSION_MINOR); goto err; } - err = i40evf_send_vf_config_msg(adapter); + err = iavf_send_vf_config_msg(adapter); if (err) { dev_err(&pdev->dev, "Unable to send config request (%d)\n", err); goto err; } - adapter->state = __I40EVF_INIT_GET_RESOURCES; + adapter->state = __IAVF_INIT_GET_RESOURCES; goto restart; - case __I40EVF_INIT_GET_RESOURCES: + case __IAVF_INIT_GET_RESOURCES: /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { bufsz = sizeof(struct virtchnl_vf_resource) + - (I40E_MAX_VF_VSI * + (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); if (!adapter->vf_res) goto err; } - err = i40evf_get_vf_config(adapter); + err = iavf_get_vf_config(adapter); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { - err = i40evf_send_vf_config_msg(adapter); + err = iavf_send_vf_config_msg(adapter); goto err; } else if (err == I40E_ERR_PARAM) { /* We only get ERR_PARAM if the device is in a very bad * state or if we've been disabled for previous bad * behavior. Either way, we're done now. */ - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); return; } @@@ -3509,25 -3483,25 +3483,25 @@@ err); goto err_alloc; } - adapter->state = __I40EVF_INIT_SW; + adapter->state = __IAVF_INIT_SW; break; default: goto err_alloc; }
- if (i40evf_process_config(adapter)) + if (iavf_process_config(adapter)) goto err_alloc; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
- netdev->netdev_ops = &i40evf_netdev_ops; - i40evf_set_ethtool_ops(netdev); + netdev->netdev_ops = &iavf_netdev_ops; + iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ;
/* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; + netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@@ -3535,25 -3509,25 +3509,25 @@@ eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); }
- timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0); + timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0); mod_timer(&adapter->watchdog_timer, jiffies + 1);
- adapter->tx_desc_count = I40EVF_DEFAULT_TXD; - adapter->rx_desc_count = I40EVF_DEFAULT_RXD; - err = i40evf_init_interrupt_scheme(adapter); + adapter->tx_desc_count = IAVF_DEFAULT_TXD; + adapter->rx_desc_count = IAVF_DEFAULT_RXD; + err = iavf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; - i40evf_map_rings_to_vectors(adapter); + iavf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
- err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); if (err) goto err_sw_init;
@@@ -3570,7 -3544,7 +3544,7 @@@
netif_tx_stop_all_queues(netdev); if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_add_device(adapter); + err = iavf_lan_add_device(adapter); if (err) dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", err); @@@ -3580,9 -3554,9 +3554,9 @@@ if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n");
- adapter->state = __I40EVF_DOWN; - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - i40evf_misc_irq_enable(adapter); + adapter->state = __IAVF_DOWN; + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + iavf_misc_irq_enable(adapter); wake_up(&adapter->down_waitqueue);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); @@@ -3591,31 -3565,31 +3565,31 @@@ goto err_mem;
if (RSS_AQ(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } else { - i40evf_init_rss(adapter); + iavf_init_rss(adapter); } return; restart: schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; err_mem: - i40evf_free_rss(adapter); + iavf_free_rss(adapter); err_register: - i40evf_free_misc_irq(adapter); + iavf_free_misc_irq(adapter); err_sw_init: - i40evf_reset_interrupt_capability(adapter); + iavf_reset_interrupt_capability(adapter); err_alloc: kfree(adapter->vf_res); adapter->vf_res = NULL; err: /* Things went into the weeds, so try again later */ - if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; schedule_delayed_work(&adapter->init_task, HZ * 5); return; } @@@ -3623,21 -3597,21 +3597,21 @@@ }
/** - * i40evf_shutdown - Shutdown the device in preparation for a reboot + * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ -static void i40evf_shutdown(struct pci_dev *pdev) +static void iavf_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (netif_running(netdev)) - i40evf_close(netdev); + iavf_close(netdev);
/* Prevent the watchdog from running. */ - adapter->state = __I40EVF_REMOVE; + adapter->state = __IAVF_REMOVE; adapter->aq_required = 0;
#ifdef CONFIG_PM @@@ -3648,21 -3622,21 +3622,21 @@@ }
/** - * i40evf_probe - Device Initialization Routine + * iavf_probe - Device Initialization Routine * @pdev: PCI device information struct - * @ent: entry in i40evf_pci_tbl + * @ent: entry in iavf_pci_tbl * * Returns 0 on success, negative on failure * - * i40evf_probe initializes an adapter identified by a pci_dev structure. + * iavf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; - struct i40evf_adapter *adapter = NULL; - struct i40e_hw *hw = NULL; + struct iavf_adapter *adapter = NULL; + struct iavf_hw *hw = NULL; int err;
err = pci_enable_device(pdev); @@@ -3679,7 -3653,7 +3653,7 @@@ } }
- err = pci_request_regions(pdev, i40evf_driver_name); + err = pci_request_regions(pdev, iavf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); @@@ -3690,8 -3664,8 +3664,8 @@@
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - I40EVF_MAX_REQ_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), + IAVF_MAX_REQ_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@@ -3709,7 -3683,7 +3683,7 @@@ hw->back = adapter;
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __I40EVF_STARTUP; + adapter->state = __IAVF_STARTUP;
/* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@@ -3742,11 -3716,11 +3716,11 @@@ INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list);
- INIT_WORK(&adapter->reset_task, i40evf_reset_task); - INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); - INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); - INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); + INIT_WORK(&adapter->reset_task, iavf_reset_task); + INIT_WORK(&adapter->adminq_task, iavf_adminq_task); + INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task); + INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); + INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@@ -3767,33 -3741,33 +3741,33 @@@ err_dma
#ifdef CONFIG_PM /** - * i40evf_suspend - Power management suspend routine + * iavf_suspend - Power management suspend routine * @pdev: PCI device information struct * @state: unused * * Called when the system (VM) is entering sleep/suspend. **/ -static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) +static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int retval = 0;
netif_device_detach(netdev);
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000);
if (netif_running(netdev)) { rtnl_lock(); - i40evf_down(adapter); + iavf_down(adapter); rtnl_unlock(); } - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
retval = pci_save_state(pdev); if (retval) @@@ -3805,14 -3779,14 +3779,14 @@@ }
/** - * i40evf_resume - Power management resume routine + * iavf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int i40evf_resume(struct pci_dev *pdev) +static int iavf_resume(struct pci_dev *pdev) { - struct i40evf_adapter *adapter = pci_get_drvdata(pdev); + struct iavf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err;
@@@ -3831,13 -3805,13 +3805,13 @@@ pci_set_master(pdev);
rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); + err = iavf_set_interrupt_capability(adapter); if (err) { rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); rtnl_unlock(); if (err) { dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); @@@ -3853,25 -3827,25 +3827,25 @@@
#endif /* CONFIG_PM */ /** - * i40evf_remove - Device Removal Routine + * iavf_remove - Device Removal Routine * @pdev: PCI device information struct * - * i40evf_remove is called by the PCI subsystem to alert the driver + * iavf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void i40evf_remove(struct pci_dev *pdev) +static void iavf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_vlan_filter *vlf, *vlftmp; - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_cloud_filter *cf, *cftmp; - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp; + struct iavf_cloud_filter *cf, *cftmp; + struct iavf_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ - set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); @@@ -3880,39 -3854,37 +3854,39 @@@ adapter->netdev_registered = false; } if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_del_device(adapter); + err = iavf_lan_del_device(adapter); if (err) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", err); }
/* Shut down all the garbage mashers on the detention level */ - adapter->state = __I40EVF_REMOVE; + adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - i40evf_request_reset(adapter); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ - if (!i40evf_asq_done(hw)) { - i40evf_request_reset(adapter); + if (!iavf_asq_done(hw)) { + iavf_request_reset(adapter); msleep(50); } - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_misc_irq_disable(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_misc_irq_disable(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter);
if (adapter->watchdog_timer.function) del_timer_sync(&adapter->watchdog_timer);
- i40evf_free_rss(adapter); + cancel_work_sync(&adapter->adminq_task); + + iavf_free_rss(adapter);
if (hw->aq.asq.count) - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw);
/* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); @@@ -3920,9 -3892,9 +3894,9 @@@
iounmap(hw->hw_addr); pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_free_queues(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter @@@ -3954,57 -3926,57 +3928,57 @@@ pci_disable_device(pdev); }
-static struct pci_driver i40evf_driver = { - .name = i40evf_driver_name, - .id_table = i40evf_pci_tbl, - .probe = i40evf_probe, - .remove = i40evf_remove, +static struct pci_driver iavf_driver = { + .name = iavf_driver_name, + .id_table = iavf_pci_tbl, + .probe = iavf_probe, + .remove = iavf_remove, #ifdef CONFIG_PM - .suspend = i40evf_suspend, - .resume = i40evf_resume, + .suspend = iavf_suspend, + .resume = iavf_resume, #endif - .shutdown = i40evf_shutdown, + .shutdown = iavf_shutdown, };
/** - * i40e_init_module - Driver Registration Routine + * iavf_init_module - Driver Registration Routine * - * i40e_init_module is the first routine called when the driver is + * iavf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ -static int __init i40evf_init_module(void) +static int __init iavf_init_module(void) { int ret;
- pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); + pr_info("iavf: %s - version %s\n", iavf_driver_string, + iavf_driver_version);
- pr_info("%s\n", i40evf_copyright); + pr_info("%s\n", iavf_copyright);
- i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40evf_driver_name); - if (!i40evf_wq) { - pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + iavf_driver_name); + if (!iavf_wq) { + pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } - ret = pci_register_driver(&i40evf_driver); + ret = pci_register_driver(&iavf_driver); return ret; }
-module_init(i40evf_init_module); +module_init(iavf_init_module);
/** - * i40e_exit_module - Driver Exit Cleanup Routine + * iavf_exit_module - Driver Exit Cleanup Routine * - * i40e_exit_module is called just before the driver is removed + * iavf_exit_module is called just before the driver is removed * from memory. **/ -static void __exit i40evf_exit_module(void) +static void __exit iavf_exit_module(void) { - pci_unregister_driver(&i40evf_driver); - destroy_workqueue(i40evf_wq); + pci_unregister_driver(&iavf_driver); + destroy_workqueue(iavf_wq); }
-module_exit(i40evf_exit_module); +module_exit(iavf_exit_module);
-/* i40evf_main.c */ +/* iavf_main.c */ diff --combined drivers/net/ethernet/intel/ice/ice_main.c index d54e63785ff0,3f047bb43348..4f5fe6af6dac --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -7,7 -7,7 +7,7 @@@
#include "ice.h"
-#define DRV_VERSION "ice-0.7.0-k" +#define DRV_VERSION "0.7.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@@ -15,7 -15,7 +15,7 @@@ static const char ice_copyright[] = "Co
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION(DRV_SUMMARY); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
static int debug = -1; @@@ -32,86 -32,10 +32,86 @@@ static const struct net_device_ops ice_ static void ice_pf_dis_all_vsi(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf); static int ice_vsi_release(struct ice_vsi *vsi); +static void ice_vsi_release_all(struct ice_pf *pf); static void ice_update_vsi_stats(struct ice_vsi *vsi); static void ice_update_pf_stats(struct ice_pf *pf);
/** + * ice_get_tx_pending - returns number of Tx descriptors not processed + * @ring: the ring of descriptors + */ +static u32 ice_get_tx_pending(struct ice_ring *ring) +{ + u32 head, tail; + + head = ring->next_to_clean; + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + return 0; +} + +/** + * ice_check_for_hang_subtask - check for and recover hung queues + * @pf: pointer to PF struct + */ +static void ice_check_for_hang_subtask(struct ice_pf *pf) +{ + struct ice_vsi *vsi = NULL; + unsigned int i; + u32 v, v_idx; + int packets; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { + vsi = pf->vsi[v]; + break; + } + + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + return; + + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) + return; + + for (i = 0; i < vsi->num_txq; i++) { + struct ice_ring *tx_ring = vsi->tx_rings[i]; + + if (tx_ring && tx_ring->desc) { + int itr = ICE_ITR_NONE; + + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.pkts & INT_MAX; + if (tx_ring->tx_stats.prev_pkt == packets) { + /* Trigger sw interrupt to revive the queue */ + v_idx = tx_ring->q_vector->v_idx; + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->base_vector + v_idx), + (itr << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_MSK_M); + continue; + } + + /* Memory barrier between read of packet count and call + * to ice_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt = + ice_get_tx_pending(tx_ring) ? packets : -1; + } + } +} + +/** * ice_get_free_slot - get the next non-NULL location index in array * @array: array to search * @size: size of the array @@@ -350,63 -274,6 +350,63 @@@ static bool ice_vsi_fltr_changed(struc }
/** + * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI + * @vsi: VSI to enable or disable VLAN pruning on + * @ena: set to true to enable VLAN pruning and false to disable it + * + * returns 0 if VSI is updated, negative otherwise + */ +static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +{ + struct ice_vsi_ctx *ctxt; + struct device *dev; + int status; + + if (!vsi) + return -EINVAL; + + dev = &vsi->back->pdev->dev; + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + if (ena) { + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } else { + ctxt->info.sec_flags &= + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + ctxt->vsi_num = vsi->vsi_num; + status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL); + if (status) { + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n", + ena ? "Ena" : "Dis", vsi->vsi_num, status, + vsi->back->hw.adminq.sq_last_status); + goto err_out; + } + + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + + devm_kfree(dev, ctxt); + return 0; + +err_out: + devm_kfree(dev, ctxt); + return -EIO; +} + +/** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@@ -589,13 -456,23 +589,13 @@@ static voi ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u32 v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); - - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* disable the VSIs and their queues that are not already DOWN */ - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ ice_pf_dis_all_vsi(pf);
- ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); }
/** @@@ -613,32 -490,26 +613,32 @@@ static void ice_do_reset(struct ice_pf WARN_ON(in_interrupt());
/* PFR is a bit of a special case because it doesn't result in an OICR - * interrupt. So for PFR, we prepare for reset, issue the reset and - * rebuild sequentially. + * interrupt. Set pending bit here which otherwise gets set in the + * OICR handler. */ - if (reset_type == ICE_RESET_PFR) { + if (reset_type == ICE_RESET_PFR) set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - ice_prepare_for_reset(pf); - } + + ice_prepare_for_reset(pf);
/* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); set_bit(__ICE_RESET_FAILED, pf->state); clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); return; }
+ /* PFR is a bit of a special case because it doesn't result in an OICR + * interrupt. So for PFR, rebuild after the reset and clear the reset- + * associated state bits. + */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf); clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); } }
@@@ -648,57 -519,48 +648,57 @@@ */ static void ice_reset_subtask(struct ice_pf *pf) { - enum ice_reset_req reset_type; - - rtnl_lock(); + enum ice_reset_req reset_type = ICE_RESET_INVAL;
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an - * OICR interrupt. The OICR handler (ice_misc_intr) determines what - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in - * pf->state. So if reset/recovery is pending (as indicated by this bit) - * we do a rebuild and return. + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type + * of reset is pending and sets bits in pf->state indicating the reset + * type and __ICE_RESET_RECOVERY_PENDING. So, if the latter bit is set + * prepare for pending reset if not already (for PF software-initiated + * global resets the software should already be prepared for it as + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * by firmware or software on other PFs, that bit is not set so prepare + * for the reset now), poll for reset done, rebuild and return. */ if (ice_is_reset_recovery_pending(pf->state)) { clear_bit(__ICE_GLOBR_RECV, pf->state); clear_bit(__ICE_CORER_RECV, pf->state); - ice_prepare_for_reset(pf); + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */ - if (ice_check_reset(&pf->hw)) + if (ice_check_reset(&pf->hw)) { set_bit(__ICE_RESET_FAILED, pf->state); - else + } else { + /* done with reset. start rebuild */ + pf->hw.reset_ongoing = false; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - goto unlock; + /* clear bit to resume normal operations, but + * ICE_NEEDS_RESTART bit is set incase rebuild failed + */ + clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + } + + return; }
/* No pending resets to finish processing. Check for new resets */ + if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) + reset_type = ICE_RESET_PFR; + if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) + reset_type = ICE_RESET_CORER; if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) reset_type = ICE_RESET_GLOBR; - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) - reset_type = ICE_RESET_CORER; - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) - reset_type = ICE_RESET_PFR; - else - goto unlock; + /* If no valid reset type requested just return */ + if (reset_type == ICE_RESET_INVAL) + return;
- /* reset if not already down or resetting */ + /* reset if not already down or busy */ if (!test_bit(__ICE_DOWN, pf->state) && !test_bit(__ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } - -unlock: - rtnl_unlock(); }
/** @@@ -1041,9 -903,6 +1041,9 @@@ static int __ice_clean_ctrlq(struct ice dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; + case ice_aqc_opc_fw_logging: + ice_output_fw_log(hw, &event.desc, event.msg_buf); + break; default: dev_dbg(&pf->pdev->dev, "%s Receive Queue unknown event 0x%04x ignored\n", @@@ -1107,9 -966,8 +1107,9 @@@ static void ice_clean_adminq_subtask(st */ static void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_DOWN, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && + !test_bit(__ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); }
@@@ -1127,22 -985,6 +1127,22 @@@ static void ice_service_task_complete(s }
/** + * ice_service_task_stop - stop service task and cancel works + * @pf: board private structure + */ +static void ice_service_task_stop(struct ice_pf *pf) +{ + set_bit(__ICE_SERVICE_DIS, pf->state); + + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + clear_bit(__ICE_SERVICE_SCHED, pf->state); +} + +/** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@@ -1155,114 -997,6 +1155,114 @@@ static void ice_service_timer(struct ti }
/** + * ice_handle_mdd_event - handle malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from service task. OICR interrupt handler indicates MDD event + */ +static void ice_handle_mdd_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool mdd_detected = false; + u32 reg; + + if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, GL_MDET_TX_PQM); + if (reg & GL_MDET_TX_PQM_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> + GL_MDET_TX_PQM_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> + GL_MDET_TX_PQM_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> + GL_MDET_TX_PQM_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> + GL_MDET_TX_PQM_QNUM_S); + + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_TX_TCLAN); + if (reg & GL_MDET_TX_TCLAN_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> + GL_MDET_TX_TCLAN_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> + GL_MDET_TX_TCLAN_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> + GL_MDET_TX_TCLAN_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> + GL_MDET_TX_TCLAN_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_RX); + if (reg & GL_MDET_RX_VALID_M) { + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> + GL_MDET_RX_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> + GL_MDET_RX_VF_NUM_S; + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> + GL_MDET_RX_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> + GL_MDET_RX_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + bool pf_mdd_detected = false; + + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF initiate a reset */ + if (pf_mdd_detected) { + set_bit(__ICE_NEEDS_RESTART, pf->state); + ice_service_task_schedule(pf); + } + } + + /* re-enable MDD interrupt cause */ + clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_MAL_DETECT_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@@ -1276,17 -1010,14 +1276,17 @@@ static void ice_service_task(struct wor /* process reset requests first */ ice_reset_subtask(pf);
- /* bail if a reset/recovery cycle is pending */ + /* bail if a reset/recovery cycle is pending or rebuild failed */ if (ice_is_reset_recovery_pending(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state)) { + test_bit(__ICE_SUSPENDED, pf->state) || + test_bit(__ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; }
+ ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); + ice_handle_mdd_event(pf); ice_watchdog_subtask(pf); ice_clean_adminq_subtask(pf);
@@@ -1298,7 -1029,6 +1298,7 @@@ * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@@ -1427,7 -1157,7 +1427,7 @@@ static void ice_vsi_delete(struct ice_v
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
- status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); + status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); if (status) dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", vsi->vsi_num); @@@ -1690,13 -1420,13 +1690,13 @@@ static void ice_set_rss_vsi_ctx(struct }
/** - * ice_vsi_add - Create a new VSI or fetch preallocated VSI + * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command to create a new VSI. */ -static int ice_vsi_add(struct ice_vsi *vsi) +static int ice_vsi_init(struct ice_vsi *vsi) { struct ice_vsi_ctx ctxt = { 0 }; struct ice_pf *pf = vsi->back; @@@ -1723,17 -1453,13 +1723,17 @@@ ctxt.info.sw_id = vsi->port_info->sw_id; ice_vsi_setup_q_map(vsi, &ctxt);
- ret = ice_aq_add_vsi(hw, &ctxt, NULL); + ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); if (ret) { - dev_err(&vsi->back->pdev->dev, - "Add VSI AQ call failed, err %d\n", ret); + dev_err(&pf->pdev->dev, + "Add VSI failed, err %d\n", ret); return -EIO; } + + /* keep context for update VSI operations */ vsi->info = ctxt.info; + + /* record VSI number returned */ vsi->vsi_num = ctxt.vsi_num;
return ret; @@@ -2009,14 -1735,8 +2009,14 @@@ static irqreturn_t ice_misc_intr(int __ oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_MAL_DETECT_M) { + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_GRST_M) { u32 reset; + /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> @@@ -2034,8 -1754,7 +2034,8 @@@ * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING, + pf->state)) { if (reset == ICE_RESET_CORER) set_bit(__ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) @@@ -2043,20 -1762,7 +2043,20 @@@ else set_bit(__ICE_EMPR_RECV, pf->state);
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* There are couple of different bits at play here. + * hw->reset_ongoing indicates whether the hardware is + * in reset. This is set to true when a reset interrupt + * is received and set back to false after the driver + * has determined that the hardware is out of reset. + * + * __ICE_RESET_RECOVERY_PENDING in pf->state indicates + * that a post reset rebuild is required before the + * driver is operational again. This is set above. + * + * As this is the start of the reset/rebuild cycle, set + * both to indicate that. + */ + hw->reset_ongoing = true; } }
@@@ -2929,12 -2635,14 +2929,12 @@@ ice_vsi_cfg_rss_exit }
/** - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI - * @vsi: pointer to the ice_vsi - * - * This reallocates the VSIs queue resources + * ice_vsi_rebuild - Rebuild VSI after reset + * @vsi: vsi to be rebuild * * Returns 0 on success and negative value on failure */ -static int ice_vsi_reinit_setup(struct ice_vsi *vsi) +static int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; int ret, i; @@@ -2950,7 -2658,7 +2950,7 @@@ ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_add(vsi); + ret = ice_vsi_init(vsi); if (ret < 0) goto err_vsi;
@@@ -2960,7 -2668,19 +2960,7 @@@
switch (vsi->type) { case ICE_VSI_PF: - if (!vsi->netdev) { - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_rings; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_rings; - - netif_carrier_off(vsi->netdev); - netif_tx_stop_all_queues(vsi->netdev); - } - + /* fall through */ ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto err_rings; @@@ -3012,23 -2732,21 +3012,23 @@@ err_vsi /** * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure - * @type: VSI type * @pi: pointer to the port_info instance + * @type: VSI type + * @vf_id: defines VF id to which this VSI connects. This field is meant to be + * used only for ICE_VSI_VF VSI type. For other VSI types, should + * fill-in ICE_INVAL_VFID as input. * * This allocates the sw VSI structure and its queue resources. * - * Returns pointer to the successfully allocated and configure VSI sw struct on - * success, otherwise returns NULL on failure. + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. */ static struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, - struct ice_port_info *pi) +ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 __always_unused vf_id) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = &pf->pdev->dev; - struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi *vsi; int ret, i;
@@@ -3051,10 -2769,12 +3051,10 @@@ ice_vsi_set_rss_params(vsi);
/* create the VSI */ - ret = ice_vsi_add(vsi); + ret = ice_vsi_init(vsi); if (ret) goto err_vsi;
- ctxt.vsi_num = vsi->vsi_num; - switch (vsi->type) { case ICE_VSI_PF: ret = ice_cfg_netdev(vsi); @@@ -3123,7 -2843,10 +3123,7 @@@ err_register_netdev vsi->netdev = NULL; } err_cfg_netdev: - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (ret) - dev_err(&vsi->back->pdev->dev, - "Free VSI AQ call failed, err %d\n", ret); + ice_vsi_delete(vsi); err_vsi: ice_vsi_put_qs(vsi); err_get_qs: @@@ -3135,20 -2858,6 +3135,20 @@@ }
/** + * ice_pf_vsi_setup - Set up a PF VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * + * Returns pointer to the successfully allocated VSI sw struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); +} + +/** * ice_vsi_add_vlan - Add vsi membership for given vlan * @vsi: the vsi being configured * @vid: vlan id to be added @@@ -3199,7 -2908,7 +3199,7 @@@ static int ice_vlan_rx_add_vid(struct n { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int ret = 0; + int ret;
if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@@ -3210,13 -2919,6 +3210,13 @@@ if (vsi->info.pvid) return -EINVAL;
+ /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) { + ret = ice_cfg_vlan_pruning(vsi, true); + if (ret) + return ret; + } + /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch @@@ -3233,19 -2935,16 +3233,19 @@@ * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be removed + * + * Returns 0 on success and negative on failure */ -static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) +static int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { struct ice_fltr_list_entry *list; struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); + int status = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); if (!list) - return; + return -ENOMEM;
list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; @@@ -3257,14 -2956,11 +3257,14 @@@ INIT_LIST_HEAD(&list->list_entry); list_add(&list->list_entry, &tmp_add_list);
- if (ice_remove_vlan(&pf->hw, &tmp_add_list)) + if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", vid, vsi->vsi_num); + status = -EIO; + }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; }
/** @@@ -3280,25 -2976,19 +3280,25 @@@ static int ice_vlan_rx_kill_vid(struct { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int status;
if (vsi->info.pvid) return -EINVAL;
- /* return code is ignored as there is nothing a user - * can do about failure to remove and a log message was - * already printed from the other function + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN + * information */ - ice_vsi_kill_vlan(vsi, vid); + status = ice_vsi_kill_vlan(vsi, vid); + if (status) + return status;
clear_bit(vid, vsi->active_vlans);
- return 0; + /* Disable VLAN pruning when VLAN 0 is removed */ + if (unlikely(!vid)) + status = ice_cfg_vlan_pruning(vsi, false); + + return status; }
/** @@@ -3314,48 -3004,50 +3314,48 @@@ static int ice_setup_pf_sw(struct ice_p struct ice_vsi *vsi; int status = 0;
- if (!ice_is_reset_recovery_pending(pf->state)) { - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); - if (!vsi) { - status = -ENOMEM; - goto error_exit; - } - } else { - vsi = pf->vsi[0]; - status = ice_vsi_reinit_setup(vsi); - if (status < 0) - return -EIO; + if (ice_is_reset_recovery_pending(pf->state)) + return -EBUSY; + + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); + if (!vsi) { + status = -ENOMEM; + goto unroll_vsi_setup; }
- /* tmp_add_list contains a list of MAC addresses for which MAC - * filters need to be programmed. Add the VSI's unicast MAC to - * this list + /* To add a MAC filter, first add the MAC to a list and then + * pass the list to ice_add_mac. */ + + /* Add a unicast MAC filter so the VSI can get its packets */ status = ice_add_mac_to_list(vsi, &tmp_add_list, vsi->port_info->mac.perm_addr); if (status) - goto error_exit; + goto unroll_vsi_setup;
/* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list. + * MAC address to the list as well. */ eth_broadcast_addr(broadcast); status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); if (status) - goto error_exit; + goto free_mac_list;
/* program MAC filters for entries in tmp_add_list */ status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) { dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); status = -ENOMEM; - goto error_exit; + goto free_mac_list; }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status;
-error_exit: +free_mac_list: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+unroll_vsi_setup: if (vsi) { ice_vsi_free_q_vectors(vsi); if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) @@@ -3405,7 -3097,10 +3405,7 @@@ static void ice_determine_q_usage(struc */ static void ice_deinit_pf(struct ice_pf *pf) { - if (pf->serv_tmr.function) - del_timer_sync(&pf->serv_tmr); - if (pf->serv_task.func) - cancel_work_sync(&pf->serv_task); + ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); } @@@ -3612,8 -3307,6 +3612,8 @@@ static int ice_probe(struct pci_dev *pd pf->pdev = pdev; pci_set_drvdata(pdev, pf); set_bit(__ICE_DOWN, pf->state); + /* Disable service task until DOWN bit is cleared */ + set_bit(__ICE_SERVICE_DIS, pf->state);
hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@@ -3671,9 -3364,6 +3671,9 @@@ goto err_init_interrupt_unroll; }
+ /* Driver is mostly up */ + clear_bit(__ICE_DOWN, pf->state); + /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@@ -3696,11 -3386,7 +3696,11 @@@ goto err_msix_misc_unroll; }
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + if (hw->evb_veb) + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + else + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; + pf->first_sw->pf = pf;
/* record the sw_id available for later use */ @@@ -3713,7 -3399,8 +3713,7 @@@ goto err_alloc_sw_unroll; }
- /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); + clear_bit(__ICE_SERVICE_DIS, pf->state);
/* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@@ -3727,7 -3414,6 +3727,7 @@@ return 0;
err_alloc_sw_unroll: + set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); devm_kfree(&pf->pdev->dev, pf->first_sw); err_msix_misc_unroll: @@@ -3750,14 -3436,24 +3750,14 @@@ err_exit_unroll static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - int i = 0; - int err;
if (!pf) return;
set_bit(__ICE_DOWN, pf->state); + ice_service_task_stop(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) { - if (!pf->vsi[i]) - continue; - - err = ice_vsi_release(pf->vsi[i]); - if (err) - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", - i, err); - } - + ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); @@@ -3804,7 -3500,7 +3804,7 @@@ static int __init ice_module_init(void pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright);
- ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@@ -4489,14 -4185,7 +4489,14 @@@ static int ice_vsi_stop_tx_rings(struc } status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, NULL); - if (status) { + /* if the disable queue command was exercised during an active reset + * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as + * the reset operation disables queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { dev_err(&pf->pdev->dev, "Failed to disable LAN Tx queues, error: %d\n", status); @@@ -5117,30 -4806,6 +5117,6 @@@ void ice_get_stats64(struct net_device stats->rx_length_errors = vsi_stats->rx_length_errors; }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /** - * ice_netpoll - polling "interrupt" handler - * @netdev: network interface device structure - * - * Used by netconsole to send skbs without having to re-enable interrupts. - * This is not called in the normal interrupt path. - */ - static void ice_netpoll(struct net_device *netdev) - { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; - int i; - - if (test_bit(__ICE_DOWN, vsi->state) || - !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return; - - for (i = 0; i < vsi->num_q_vectors; i++) - ice_msix_clean_rings(0, vsi->q_vectors[i]); - } - #endif /* CONFIG_NET_POLL_CONTROLLER */ - /** * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI * @vsi: VSI having NAPI disabled @@@ -5391,14 -5056,8 +5367,14 @@@ static int ice_vsi_release(struct ice_v if (!vsi->back) return -ENODEV; pf = vsi->back; - - if (vsi->netdev) { + /* do not unregister and free netdevs while driver is in the reset + * recovery pending state. Since reset/rebuild happens through PF + * service task workqueue, its not a good idea to unregister netdev + * that is associated to the PF that is running the work queue items + * currently. This is done to avoid check_flush_dependency() warning + * on this wq + */ + if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) { unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; @@@ -5424,40 -5083,12 +5400,40 @@@ pf->q_left_tx += vsi->alloc_txq; pf->q_left_rx += vsi->alloc_rxq;
- ice_vsi_clear(vsi); + /* retain SW VSI data structure since it is needed to unregister and + * free VSI netdev when PF is not in reset recovery pending state,\ + * for ex: during rmmod. + */ + if (!ice_is_reset_recovery_pending(pf->state)) + ice_vsi_clear(vsi);
return 0; }
/** + * ice_vsi_release_all - Delete all VSIs + * @pf: PF from which all VSIs are being removed + */ +static void ice_vsi_release_all(struct ice_pf *pf) +{ + int err, i; + + if (!pf->vsi) + return; + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + + err = ice_vsi_release(pf->vsi[i]); + if (err) + dev_dbg(&pf->pdev->dev, + "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", + i, err, pf->vsi[i]->vsi_num); + } +} + +/** * ice_dis_vsi - pause a VSI * @vsi: the VSI being paused */ @@@ -5469,31 -5100,27 +5445,31 @@@ static void ice_dis_vsi(struct ice_vsi set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev) && - vsi->type == ICE_VSI_PF) + vsi->type == ICE_VSI_PF) { + rtnl_lock(); vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - - ice_vsi_close(vsi); + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } }
/** * ice_ena_vsi - resume a VSI * @vsi: the VSI being resume */ -static void ice_ena_vsi(struct ice_vsi *vsi) +static int ice_ena_vsi(struct ice_vsi *vsi) { - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) - return; + int err = 0; + + if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) + if (vsi->netdev && netif_running(vsi->netdev)) { + rtnl_lock(); + err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + rtnl_unlock(); + }
- if (vsi->netdev && netif_running(vsi->netdev)) - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); - else if (ice_vsi_open(vsi)) - /* this clears the DOWN bit */ - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", - vsi->vsi_num, vsi->vsw->sw_id); + return err; }
/** @@@ -5513,47 -5140,13 +5489,47 @@@ static void ice_pf_dis_all_vsi(struct i * ice_pf_ena_all_vsi - Resume all VSIs on a PF * @pf: the PF */ -static void ice_pf_ena_all_vsi(struct ice_pf *pf) +static int ice_pf_ena_all_vsi(struct ice_pf *pf) { int v;
ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_ena_vsi(pf->vsi[v]); + if (ice_ena_vsi(pf->vsi[v])) + return -EIO; + + return 0; +} + +/** + * ice_vsi_rebuild_all - rebuild all VSIs in pf + * @pf: the PF + */ +static int ice_vsi_rebuild_all(struct ice_pf *pf) +{ + int i; + + /* loop through pf->vsi array and reinit the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + int err; + + if (!pf->vsi[i]) + continue; + + err = ice_vsi_rebuild(pf->vsi[i]); + if (err) { + dev_err(&pf->pdev->dev, + "VSI at index %d rebuild failed\n", + pf->vsi[i]->idx); + return err; + } + + dev_info(&pf->pdev->dev, + "VSI at index %d rebuilt. vsi_num = 0x%x\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + return 0; }
/** @@@ -5575,13 -5168,13 +5551,13 @@@ static void ice_rebuild(struct ice_pf * ret = ice_init_all_ctrlq(hw); if (ret) { dev_err(dev, "control queues init failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; }
ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; }
ice_clear_pxe_mode(hw); @@@ -5589,24 -5182,14 +5565,24 @@@ ret = ice_get_caps(hw); if (ret) { dev_err(dev, "ice_get_caps failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; }
- /* basic nic switch setup */ - err = ice_setup_pf_sw(pf); + err = ice_sched_init_port(hw->port_info); + if (err) + goto err_sched_init_port; + + err = ice_vsi_rebuild_all(pf); if (err) { - dev_err(dev, "ice_setup_pf_sw failed\n"); - goto fail_reset; + dev_err(dev, "ice_vsi_rebuild_all failed\n"); + goto err_vsi_rebuild; + } + + ret = ice_replay_all_fltr(&pf->hw); + if (ret) { + dev_err(&pf->pdev->dev, + "error replaying switch filter rules\n"); + goto err_vsi_rebuild; }
/* start misc vector */ @@@ -5614,35 -5197,20 +5590,35 @@@ err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "misc vector setup failed: %d\n", err); - goto fail_reset; + goto err_vsi_rebuild; } }
/* restart the VSIs that were rebuilt and running before the reset */ - ice_pf_ena_all_vsi(pf); + err = ice_pf_ena_all_vsi(pf); + if (err) { + dev_err(&pf->pdev->dev, "error enabling VSIs\n"); + /* no need to disable VSIs in tear down path in ice_rebuild() + * since its already taken care in ice_vsi_open() + */ + goto err_vsi_rebuild; + }
+ /* if we get here, reset flow is successful */ + clear_bit(__ICE_RESET_FAILED, pf->state); return;
-fail_reset: +err_vsi_rebuild: + ice_vsi_release_all(pf); +err_sched_init_port: + ice_sched_cleanup_all(hw); +err_init_ctrlq: ice_shutdown_all_ctrlq(hw); set_bit(__ICE_RESET_FAILED, pf->state); clear_recovery: - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* set this bit in PF state to control service task scheduling */ + set_bit(__ICE_NEEDS_RESTART, pf->state); + dev_err(dev, "Rebuild failed, unload and reload driver\n"); }
/** @@@ -5798,232 -5366,6 +5774,232 @@@ int ice_get_rss(struct ice_vsi *vsi, u }
/** + * ice_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq + * @dev: the netdev being configured + * @filter_mask: filter mask passed in + * @nlflags: netlink flags passed in + * + * Return the bridge mode (VEB/VEPA) + */ +static int +ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 bmode; + + bmode = pf->first_sw->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +/** + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) + * @vsi: Pointer to VSI structure + * @bmode: Hardware bridge mode (VEB/VEPA) + * + * Returns 0 on success, negative on failure + */ +static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_aqc_vsi_props *vsi_props; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + vsi_props = &vsi->info; + ctxt.info = vsi->info; + + if (bmode == BRIDGE_MODE_VEB) + /* change from VEPA to VEB mode */ + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + else + /* change from VEB to VEPA mode */ + ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt.vsi_num = vsi->vsi_num; + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + status = ice_aq_update_vsi(hw, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", + bmode, status, hw->adminq.sq_last_status); + return -EIO; + } + /* Update sw flags for book keeping */ + vsi_props->sw_flags = ctxt.info.sw_flags; + + return 0; +} + +/** + * ice_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * @flags: bridge setlink flags + * + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if + * not already set for all VSIs connected to this switch. And also update the + * unicast switch filter rules for the corresponding switch of the netdev. + */ +static int +ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + struct nlattr *attr, *br_spec; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + struct ice_sw *pf_sw; + int rem, v, err = 0; + + pf_sw = pf->first_sw; + /* find the attribute in the netlink message */ + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + /* Continue if bridge mode is not being flipped */ + if (mode == pf_sw->bridge_mode) + continue; + /* Iterates through the PF VSI list and update the loopback + * mode of the VSI + */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); + if (err) + return err; + } + + hw->evb_veb = (mode == BRIDGE_MODE_VEB); + /* Update the unicast switch filter rules for the corresponding + * switch of the netdev + */ + status = ice_update_sw_rule_bridge_mode(hw); + if (status) { + netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + mode, status, hw->adminq.sq_last_status); + /* revert hw->evb_veb */ + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); + return -EIO; + } + + pf_sw->bridge_mode = mode; + } + + return 0; +} + +/** + * ice_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void ice_tx_timeout(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_ring *tx_ring = NULL; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 head, val = 0, i; + int hung_queue = -1; + + pf->tx_timeout_count++; + + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + + /* Reset recovery level if enough time has elapsed after last timeout. + * Also ensure no new reset action happens before next timeout period. + */ + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + + netdev->watchdog_timeo))) + return; + + if (tx_ring) { + head = tx_ring->next_to_clean; + /* Read interrupt register */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + val = rd32(&pf->hw, + GLINT_DYN_CTL(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case 2: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case 3: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); + set_bit(__ICE_DOWN, pf->state); + set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(__ICE_SERVICE_DIS, pf->state); + break; + } + + ice_service_task_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@@ -6041,11 -5383,6 +6017,11 @@@ static int ice_open(struct net_device * struct ice_vsi *vsi = np->vsi; int err;
+ if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); + return -EIO; + } + netif_carrier_off(netdev);
err = ice_vsi_open(vsi); @@@ -6136,15 -5473,9 +6112,12 @@@ static const struct net_device_ops ice_ .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ice_netpoll, - #endif /* CONFIG_NET_POLL_CONTROLLER */ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, + .ndo_bridge_getlink = ice_bridge_getlink, + .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, + .ndo_tx_timeout = ice_tx_timeout, }; diff --combined drivers/net/ethernet/intel/igb/igb_main.c index c18e79112cad,0796cef96fa3..0d29df8accd8 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -205,10 -205,6 +205,6 @@@ static struct notifier_block dca_notifi .priority = 0 }; #endif - #ifdef CONFIG_NET_POLL_CONTROLLER - /* for netdump / net console */ - static void igb_netpoll(struct net_device *); - #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; module_param(max_vfs, uint, 0); @@@ -243,7 -239,7 +239,7 @@@ static struct pci_driver igb_driver =
MODULE_AUTHOR("Intel Corporation, e1000-devel@lists.sourceforge.net"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@@ -2881,9 -2877,6 +2877,6 @@@ static const struct net_device_ops igb_ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, .ndo_set_vf_trust = igb_ndo_set_vf_trust, .ndo_get_vf_config = igb_ndo_get_vf_config, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igb_netpoll, - #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, @@@ -9053,29 -9046,6 +9046,6 @@@ static int igb_pci_sriov_configure(stru return 0; }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - static void igb_netpoll(struct net_device *netdev) - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct igb_q_vector *q_vector; - int i; - - for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (adapter->flags & IGB_FLAG_HAS_MSIX) - wr32(E1000_EIMC, q_vector->eims_value); - else - igb_irq_disable(adapter); - napi_schedule(&q_vector->napi); - } - } - #endif /* CONFIG_NET_POLL_CONTROLLER */ - /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device diff --combined drivers/net/ethernet/intel/ixgb/ixgb_main.c index 4bb89587cd6a,7722153c4ac2..1d4d1686909a --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@@ -81,11 -81,6 +81,6 @@@ static int ixgb_vlan_rx_kill_vid(struc __be16 proto, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
- #ifdef CONFIG_NET_POLL_CONTROLLER - /* for netdump / net console */ - static void ixgb_netpoll(struct net_device *dev); - #endif - static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, enum pci_channel_state state); static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); @@@ -107,7 -102,7 +102,7 @@@ static struct pci_driver ixgb_driver =
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@@ -348,9 -343,6 +343,6 @@@ static const struct net_device_ops ixgb .ndo_tx_timeout = ixgb_tx_timeout, .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgb_netpoll, - #endif .ndo_fix_features = ixgb_fix_features, .ndo_set_features = ixgb_set_features, }; @@@ -2195,23 -2187,6 +2187,6 @@@ ixgb_restore_vlan(struct ixgb_adapter * ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - - static void ixgb_netpoll(struct net_device *dev) - { - struct ixgb_adapter *adapter = netdev_priv(dev); - - disable_irq(adapter->pdev->irq); - ixgb_intr(adapter->pdev->irq, dev); - enable_irq(adapter->pdev->irq); - } - #endif - /** * ixgb_io_error_detected - called when PCI error is detected * @pdev: pointer to pci device with error diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 27a8546c88b2,f27d73a7bf16..140e87a10ff5 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -159,7 -159,7 +159,7 @@@ MODULE_PARM_DESC(debug, "Debug level (0
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq; @@@ -7775,33 -7775,6 +7775,33 @@@ static void ixgbe_reset_subtask(struct }
/** + * ixgbe_check_fw_error - Check firmware for errors + * @adapter: the adapter private structure + * + * Check firmware errors in register FWSM + */ +static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 fwsm; + + /* read fwsm.ext_err_ind register and log errors */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + + if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || + !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) + e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", + fwsm); + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + return true; + } + + return false; +} + +/** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ @@@ -7819,15 -7792,6 +7819,15 @@@ static void ixgbe_service_task(struct w ixgbe_service_event_complete(adapter); return; } + if (ixgbe_check_fw_error(adapter)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + unregister_netdev(adapter->netdev); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; @@@ -8804,28 -8768,6 +8804,6 @@@ static int ixgbe_del_sanmac_netdev(stru return err; }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - static void ixgbe_netpoll(struct net_device *netdev) - { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - /* loop through and schedule all active queues */ - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); - } - - #endif - static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, struct ixgbe_ring *ring) { @@@ -10287,9 -10229,6 +10265,6 @@@ static const struct net_device_ops ixgb .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, .ndo_setup_tc = __ixgbe_setup_tc, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbe_netpoll, - #endif #ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, @@@ -10752,11 -10691,6 +10727,11 @@@ skip_sriov if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO;
+ if (ixgbe_check_fw_error(adapter)) { + err = -EIO; + goto err_sw_init; + } + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index ba6562e8cd73,5a228582423b..98707ee11d72 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -40,7 -40,7 +40,7 @@@ static const char ixgbevf_driver_string #define DRV_VERSION "4.1.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2015 Intel Corporation."; + "Copyright (c) 2009 - 2018 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@@ -79,7 -79,7 +79,7 @@@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tb
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@@ -268,7 -268,7 +268,7 @@@ static bool ixgbevf_clean_tx_irq(struc struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean;
@@@ -299,8 -299,6 +299,8 @@@ /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) + total_ipsec++;
/* free the skb */ if (ring_is_xdp(tx_ring)) @@@ -363,7 -361,6 +363,7 @@@ u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; @@@ -519,9 -516,6 +519,9 @@@ static void ixgbevf_process_skb_fields( __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); }
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) + ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); }
@@@ -1018,7 -1012,7 +1018,7 @@@ static int ixgbevf_xmit_xdp_ring(struc context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); @@@ -2206,7 -2200,6 +2206,7 @@@ static void ixgbevf_configure(struct ix ixgbevf_set_rx_mode(adapter->netdev);
ixgbevf_restore_vlan(adapter); + ixgbevf_ipsec_restore(adapter);
ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); @@@ -2253,8 -2246,7 +2253,8 @@@ static void ixgbevf_init_last_counter_s static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_13, + int api[] = { ixgbe_mbox_api_14, + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@@ -2613,7 -2605,6 +2613,7 @@@ static void ixgbevf_set_num_queues(stru case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@@ -3709,8 -3700,8 +3709,8 @@@ static void ixgbevf_queue_reset_subtask }
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, - u32 vlan_macip_lens, u32 type_tucmd, - u32 mss_l4len_idx) + u32 vlan_macip_lens, u32 fceof_saidx, + u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@@ -3724,15 -3715,14 +3724,15 @@@ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); }
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, - u8 *hdr_len) + u8 *hdr_len, + struct ixgbevf_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; @@@ -3746,7 -3736,6 +3746,7 @@@ unsigned char *hdr; } l4; u32 paylen, l4_offset; + u32 fceof_saidx = 0; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) @@@ -3772,15 -3761,13 +3772,15 @@@ if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + int len = csum_start - trans_start;
/* IP header will have to cancel out any data that - * is not a part of the outer IP header + * is not a part of the outer IP header, so set to + * a reverse csum if needed, else init check to 0. */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0; @@@ -3812,16 -3799,13 +3812,16 @@@ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
+ fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, + mss_l4len_idx);
return 1; } @@@ -3836,12 -3820,10 +3836,12 @@@ static inline bool ixgbevf_ipv6_csum_is }
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first) + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; + u32 fceof_saidx = 0; u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) @@@ -3867,10 -3849,6 +3867,10 @@@ skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - @@@ -3880,11 -3858,7 +3880,11 @@@ no_csum vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + fceof_saidx, type_tucmd, 0); }
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@@ -3918,12 -3892,8 +3918,12 @@@ static void ixgbevf_tx_olinfo_status(un if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- /* use index 1 context for TSO/FSO/FCOE */ - if (tx_flags & IXGBE_TX_FLAGS_TSO) + /* enable IPsec */ + if (tx_flags & IXGBE_TX_FLAGS_IPSEC) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); + + /* use index 1 context for TSO/FSO/FCOE/IPSEC */ + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it @@@ -4105,7 -4075,6 +4105,7 @@@ static int ixgbevf_xmit_frame_ring(stru int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif @@@ -4150,15 -4119,11 +4150,15 @@@ first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb);
- tso = ixgbevf_tso(tx_ring, first, &hdr_len); +#ifdef CONFIG_XFRM_OFFLOAD + if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + goto out_drop; +#endif + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) - ixgbevf_tx_csum(tx_ring, first); + ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
ixgbevf_tx_map(tx_ring, first, hdr_len);
@@@ -4268,24 -4233,6 +4268,6 @@@ static int ixgbevf_change_mtu(struct ne return 0; }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - static void ixgbevf_netpoll(struct net_device *netdev) - { - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return; - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); - } - #endif /* CONFIG_NET_POLL_CONTROLLER */ - static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); @@@ -4517,9 -4464,6 +4499,6 @@@ static const struct net_device_ops ixgb .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbevf_netpoll, - #endif .ndo_features_check = ixgbevf_features_check, .ndo_bpf = ixgbevf_xdp, }; @@@ -4669,7 -4613,6 +4648,7 @@@ static int ixgbevf_probe(struct pci_de case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@@ -4705,7 -4648,6 +4684,7 @@@
pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); + ixgbevf_init_ipsec_offload(adapter);
ixgbevf_init_last_counter_stats(adapter);
@@@ -4772,7 -4714,6 +4751,7 @@@ static void ixgbevf_remove(struct pci_d if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev);
+ ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter);
diff --combined drivers/net/ethernet/marvell/mvneta.c index 59ed63102e14,b4ed7d394d07..a66f8f8552e4 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -1890,8 -1890,8 +1890,8 @@@ static void mvneta_rxq_drop_pkts(struc if (!data || !(rx_desc->buf_phys_addr)) continue;
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(data); } } @@@ -2008,8 -2008,8 +2008,8 @@@ static int mvneta_rx_swbm(struct napi_s skb_add_rx_frag(rxq->skb, frag_num, page, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); rxq->left_size -= frag_size; } } else { @@@ -2039,9 -2039,8 +2039,8 @@@ frag_offset, frag_size, PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, - DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size; } @@@ -2065,7 -2064,10 +2064,7 @@@ /* Linux processing */ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- if (dev->features & NETIF_F_GRO) - napi_gro_receive(napi, rxq->skb); - else - netif_receive_skb(rxq->skb); + napi_gro_receive(napi, rxq->skb);
/* clean uncomplete skb pointer in queue */ rxq->skb = NULL; @@@ -2393,7 -2395,7 +2392,7 @@@ error }
/* Main tx processing */ -static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); @@@ -2507,13 -2509,12 +2506,13 @@@ static void mvneta_tx_done_gbe(struct m { struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu = smp_processor_id();
while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id); - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, cpu);
if (txq->count) mvneta_txq_done(pp, txq); @@@ -3791,6 -3792,9 +3790,6 @@@ static int mvneta_open(struct net_devic goto err_free_online_hp; }
- /* In default link is down */ - netif_carrier_off(pp->dev); - ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); @@@ -4593,8 -4597,7 +4592,8 @@@ static int mvneta_probe(struct platform } }
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --combined drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index c2ed71788e4f,38cc01beea79..2373cd41a625 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@@ -82,19 -82,13 +82,19 @@@ u32 mvpp2_read(struct mvpp2 *priv, u32 return readl(priv->swth_base[0] + offset); }
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } + +static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) +{ + return cpu % priv->nthreads; +} + /* These accessors should be used to access: * - * - per-CPU registers, where each CPU has its own copy of the + * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG @@@ -110,8 -104,8 +110,8 @@@ * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU + * - global registers that must be accessed through a specific thread + * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) @@@ -128,28 -122,28 +128,28 @@@ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ -void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, +static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel(data, priv->swth_base[cpu] + offset); + writel(data, priv->swth_base[thread] + offset); }
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, +static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl(priv->swth_base[cpu] + offset); + return readl(priv->swth_base[thread] + offset); }
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, +static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel_relaxed(data, priv->swth_base[cpu] + offset); + writel_relaxed(data, priv->swth_base[thread] + offset); }
-static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, +static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl_relaxed(priv->swth_base[cpu] + offset); + return readl_relaxed(priv->swth_base[thread] + offset); }
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@@ -391,17 -385,17 +391,17 @@@ static void mvpp2_bm_bufs_get_addrs(str dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
- *dma_addr = mvpp2_percpu_read(priv, cpu, + *dma_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
if (priv->hw_version == MVPP22) { u32 val; u32 dma_addr_highbits, phys_addr_highbits;
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; @@@ -632,11 -626,7 +632,11 @@@ static inline void mvpp2_bm_pool_put(st dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned long flags = 0; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version == MVPP22) { u32 val = 0; @@@ -650,7 -640,7 +650,7 @@@ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP22_BM_ADDR_HIGH_RLS_REG, val); }
@@@ -659,14 -649,11 +659,14 @@@ * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->bm_lock[thread], flags); + put_cpu(); }
@@@ -899,7 -886,7 +899,7 @@@ static inline void mvpp2_qvec_interrupt MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); }
-/* Mask the current CPU's Rx/Tx interrupts +/* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@@ -907,16 -894,11 +907,16 @@@ static void mvpp2_interrupts_mask(void { struct mvpp2_port *port = arg;
- mvpp2_percpu_write(port->priv, smp_processor_id(), + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); }
-/* Unmask the current CPU's Rx/Tx interrupts. +/* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@@ -925,17 -907,12 +925,17 @@@ static void mvpp2_interrupts_unmask(voi struct mvpp2_port *port = arg; u32 val;
+ /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), val); }
@@@ -951,7 -928,7 +951,7 @@@ mvpp2_shared_interrupt_mask_unmask(stru if (mask) val = 0; else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; @@@ -959,7 -936,7 +959,7 @@@ if (v->type != MVPP2_QUEUE_VECTOR_SHARED) continue;
- mvpp2_percpu_write(port->priv, v->sw_thread_id, + mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } } @@@ -1448,9 -1425,6 +1448,9 @@@ static void mvpp2_defaults_set(struct m tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+ /* Set TXQ scheduling to Round-Robin */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); + /* Close bandwidth for all queues */ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { ptxq = mvpp2_txq_phys(port->id, queue); @@@ -1650,8 -1624,7 +1650,8 @@@ mvpp2_txq_next_desc_get(struct mvpp2_tx static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_AGGR_TXQ_UPDATE_REG, pending); }
@@@ -1661,15 -1634,14 +1661,15 @@@ * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ -static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, +static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + unsigned int thread = + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 val = mvpp2_read_relaxed(port->priv, + MVPP2_AGGR_TXQ_STATUS_REG(thread));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
@@@ -1685,17 -1657,16 +1685,17 @@@ * only by mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ -static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, int num) { + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2 *priv = port->priv; u32 val; - int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@@ -1703,13 -1674,12 +1703,13 @@@ /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ -static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { - int req, cpu, desc_count; + int req, desc_count; + unsigned int thread;
if (txq_pcpu->reserved_num >= num) return 0; @@@ -1720,10 -1690,10 +1720,10 @@@
desc_count = 0; /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { + for (thread = 0; thread < port->priv->nthreads; thread++) { struct mvpp2_txq_pcpu *txq_pcpu_aux;
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } @@@ -1732,10 -1702,10 +1732,10 @@@ desc_count += req;
if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM;
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
/* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) @@@ -1789,7 -1759,7 +1789,7 @@@ static u32 mvpp2_txq_desc_csum(int l3_o
/* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. - * Per-CPU access + * Per-thread access * * Called only from mvpp2_txq_done(), called from mvpp2_tx() * (migration disabled) and from the TX completion tasklet (migration @@@ -1801,8 -1771,7 +1801,8 @@@ static inline int mvpp2_txq_sent_desc_p u32 val;
/* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + val = mvpp2_thread_read_relaxed(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> @@@ -1817,15 -1786,10 +1817,15 @@@ static void mvpp2_txq_sent_counter_clea struct mvpp2_port *port = arg; int queue;
+ /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id;
- mvpp2_percpu_read(port->priv, smp_processor_id(), + mvpp2_thread_read(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(id)); } } @@@ -1885,13 -1849,13 +1885,13 @@@ static void mvpp2_txp_max_tx_size_set(s static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
put_cpu(); @@@ -1901,15 -1865,15 +1901,15 @@@ static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu(); } @@@ -2010,7 -1974,7 +2010,7 @@@ static void mvpp2_txq_done(struct mvpp2 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done;
- if (txq_pcpu->cpu != smp_processor_id()) + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
tx_done = mvpp2_txq_sent_desc_proc(port, txq); @@@ -2026,7 -1990,7 +2026,7 @@@ }
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) + unsigned int thread) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@@ -2037,7 -2001,7 +2037,7 @@@ if (!txq) break;
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@@ -2053,8 -2017,8 +2053,8 @@@
/* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) + struct mvpp2_tx_queue *aggr_txq, + unsigned int thread, struct mvpp2 *priv) { u32 txq_dma;
@@@ -2069,7 -2033,7 +2069,7 @@@
/* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + MVPP2_AGGR_TXQ_INDEX_REG(thread));
/* Set Tx descriptors queue starting address indirect * access @@@ -2080,8 -2044,8 +2080,8 @@@ txq_dma = aggr_txq->descs_dma >> MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), MVPP2_AGGR_TXQ_SIZE);
return 0; @@@ -2092,8 -2056,8 +2092,8 @@@ static int mvpp2_rxq_init(struct mvpp2_ struct mvpp2_rx_queue *rxq)
{ + unsigned int thread; u32 rxq_dma; - int cpu;
rxq->size = port->rx_ring_size;
@@@ -2110,15 -2074,15 +2110,15 @@@ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; else rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); put_cpu();
/* Set Offset */ @@@ -2163,7 -2127,7 +2163,7 @@@ static void mvpp2_rxq_drop_pkts(struct static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu; + unsigned int thread;
mvpp2_rxq_drop_pkts(port, rxq);
@@@ -2182,10 -2146,10 +2182,10 @@@ * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); put_cpu(); }
@@@ -2194,8 -2158,7 +2194,8 @@@ static int mvpp2_txq_init(struct mvpp2_ struct mvpp2_tx_queue *txq) { u32 val; - int cpu, desc, desc_per_txq, tx_port_num; + unsigned int thread; + int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu;
txq->size = port->tx_ring_size; @@@ -2210,18 -2173,18 +2210,18 @@@ txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@@ -2232,7 -2195,7 +2232,7 @@@ desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); @@@ -2251,8 -2214,8 +2251,8 @@@ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val);
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), @@@ -2286,10 -2249,10 +2286,10 @@@ static void mvpp2_txq_deinit(struct mvp struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; + unsigned int thread;
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); kfree(txq_pcpu->buffs);
if (txq_pcpu->tso_headers) @@@ -2315,10 -2278,10 +2315,10 @@@ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); put_cpu(); }
@@@ -2326,14 -2289,14 +2326,14 @@@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; + int delay, pending; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val;
- cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets * to be transmitted. @@@ -2349,17 -2312,17 +2349,17 @@@ mdelay(1); delay++;
- pending = mvpp2_percpu_read(port->priv, cpu, + pending = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); pending &= MVPP2_TXQ_PENDING_MASK; } while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); put_cpu();
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
/* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); @@@ -2426,17 -2389,13 +2426,17 @@@ err_cleanup static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; - int queue, err; + int queue, err, cpu;
for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; + + /* Assign this queue to a CPU */ + cpu = queue % num_present_cpus(); + netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); }
if (port->has_tx_irqs) { @@@ -2544,20 -2503,16 +2544,20 @@@ static void mvpp2_tx_proc_cb(unsigned l { struct net_device *dev = (struct net_device *)data; struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause;
+ port_pcpu = per_cpu_ptr(port->pcpu, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + if (!netif_running(dev)) return; port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + tx_todo = mvpp2_tx_done(port, cause, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */ if (tx_todo) @@@ -2773,8 -2728,7 +2773,8 @@@ static inline voi tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
dma_addr_t buf_dma_addr = mvpp2_txdesc_dma_addr_get(port, desc); @@@ -2791,8 -2745,7 +2791,8 @@@ static int mvpp2_tx_frag_process(struc struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_dma_addr; @@@ -2911,8 -2864,9 +2911,8 @@@ static int mvpp2_tx_tso(struct sk_buff int i, len, descs = 0;
/* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, tso_count_descs(skb))) return 0;
@@@ -2952,28 -2906,21 +2952,28 @@@ release }
/* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_dma_addr; + unsigned long flags = 0; + unsigned int thread; int frags = 0; u16 txq_id; u32 tx_cmd;
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->tx_lock[thread], flags);
if (skb_is_gso(skb)) { frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); @@@ -2982,8 -2929,9 +2982,8 @@@ frags = skb_shinfo(skb)->nr_frags + 1;
/* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { frags = 0; goto out; } @@@ -3025,7 -2973,7 +3025,7 @@@
out: if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq_pcpu->reserved_num -= frags; @@@ -3055,14 -3003,11 +3055,14 @@@ /* Set the timer in case not all frags were processed */ if (!port->has_tx_irqs && txq_pcpu->count <= frags && txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
mvpp2_timer_set(port_pcpu); }
+ if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->tx_lock[thread], flags); + return NETDEV_TX_OK; }
@@@ -3082,7 -3027,7 +3082,7 @@@ static int mvpp2_poll(struct napi_struc int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@@ -3096,7 -3041,7 +3096,7 @@@ * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@@ -3105,20 -3050,21 +3105,22 @@@
/* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, + mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); }
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - if (cause_tx) { - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + if (port->has_tx_irqs) { + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } }
/* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { @@@ -3193,13 -3139,14 +3195,13 @@@ static void mvpp2_start_dev(struct mvpp for (i = 0; i < port->nqvecs; i++) napi_enable(&port->qvecs[i].napi);
- /* Enable interrupts on all CPUs */ + /* Enable interrupts on all threads */ mvpp2_interrupts_enable(port);
if (port->priv->hw_version == MVPP22) mvpp22_mode_reconfigure(port);
if (port->phylink) { - netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@@ -3222,7 -3169,7 +3224,7 @@@ static void mvpp2_stop_dev(struct mvpp2 { int i;
- /* Disable interrupts on all CPUs */ + /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port);
for (i = 0; i < port->nqvecs; i++) @@@ -3302,18 -3249,9 +3304,18 @@@ static int mvpp2_irqs_init(struct mvpp2 if (err) goto err;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + unsigned long mask = 0; + unsigned int cpu; + + for_each_present_cpu(cpu) { + if (mvpp2_cpu_to_thread(port->priv, cpu) == + qv->sw_thread_id) + mask |= BIT(cpu); + } + + irq_set_affinity_hint(qv->irq, to_cpumask(&mask)); + } }
return 0; @@@ -3457,11 -3395,11 +3459,11 @@@ static int mvpp2_stop(struct net_devic { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; - int cpu; + unsigned int thread;
mvpp2_stop_dev(port);
- /* Mask interrupts on all CPUs */ + /* Mask interrupts on all threads */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true);
@@@ -3472,8 -3410,8 +3474,8 @@@
mvpp2_irqs_deinit(port); if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; @@@ -3618,7 -3556,7 +3620,7 @@@ mvpp2_get_stats64(struct net_device *de { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; - int cpu; + unsigned int cpu;
for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; @@@ -4045,18 -3983,12 +4047,18 @@@ static int mvpp2_simple_queue_vectors_i static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { + struct mvpp2 *priv = port->priv; struct mvpp2_queue_vector *v; int i, ret;
- port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; + switch (queue_mode) { + case MVPP2_QDIST_SINGLE_MODE: + port->nqvecs = priv->nthreads + 1; + break; + case MVPP2_QDIST_MULTI_MODE: + port->nqvecs = priv->nthreads; + break; + }
for (i = 0; i < port->nqvecs; i++) { char irqname[16]; @@@ -4068,10 -4000,7 +4070,10 @@@ v->sw_thread_id = i; v->sw_thread_mask = BIT(i);
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + if (port->flags & MVPP2_F_DT_COMPAT) + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + else + snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) { v->first_rxq = i * MVPP2_DEFAULT_RXQ; @@@ -4081,9 -4010,7 +4083,9 @@@ v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); + + if (port->flags & MVPP2_F_DT_COMPAT) + strncpy(irqname, "rx-shared", sizeof(irqname)); }
if (port_node) @@@ -4160,8 -4087,7 +4162,8 @@@ static int mvpp2_port_init(struct mvpp2 struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; + unsigned int thread; + int queue, err;
/* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > @@@ -4205,9 -4131,9 +4207,9 @@@ txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; + for (thread = 0; thread < priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->thread = thread; }
port->txqs[queue] = txq; @@@ -4280,51 -4206,24 +4282,51 @@@ err_free_percpu return err; }
-/* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. +static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, + unsigned long *flags) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3" }; + int i; + + for (i = 0; i < 5; i++) + if (of_property_match_string(port_node, "interrupt-names", + irqs[i]) < 0) + return false; + + *flags |= MVPP2_F_DT_COMPAT; + return true; +} + +/* Checks if the port dt description has the required Tx interrupts: + * - PPv2.1: there are no such interrupts. + * - PPv2.2: + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] + * - The new ones have: "hifX" with X in [0..8] + * + * All those variants are supported to keep the backward compatibility. */ -static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) +static bool mvpp2_port_has_irqs(struct mvpp2 *priv, + struct device_node *port_node, + unsigned long *flags) { - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; + char name[5]; + int i; + + /* ACPI */ + if (!port_node) + return true;
if (priv->hw_version == MVPP21) return false;
- for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) + return true; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + snprintf(name, 5, "hif%d", i); + if (of_property_match_string(port_node, "interrupt-names", + name) < 0) return false; }
@@@ -4701,21 -4600,23 +4703,21 @@@ static int mvpp2_port_probe(struct plat struct resource *res; struct phylink *phylink; char *mac_from = ""; - unsigned int ntxqs, nrxqs; + unsigned int ntxqs, nrxqs, thread; + unsigned long flags = 0; bool has_tx_irqs; u32 id; int features; int phy_mode; - int err, i, cpu; + int err, i;
- if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { + dev_err(&pdev->dev, + "not enough IRQs to support multi queue mode\n"); + return -EINVAL; }
- if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - ntxqs = MVPP2_MAX_TXQ; if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); @@@ -4763,7 -4664,6 +4765,7 @@@ port->nrxqs = nrxqs; port->priv = priv; port->has_tx_irqs = has_tx_irqs; + port->flags = flags;
err = mvpp2_queue_vectors_init(port, port_node); if (err) @@@ -4860,8 -4760,8 +4862,8 @@@ }
if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); @@@ -5145,13 -5045,13 +5147,13 @@@ static int mvpp2_init(struct platform_d }
/* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM;
- for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); @@@ -5198,7 -5098,7 +5200,7 @@@ static int mvpp2_probe(struct platform_ struct mvpp2 *priv; struct resource *res; void __iomem *base; - int i; + int i, shared; int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@@ -5263,15 -5163,6 +5265,15 @@@
mvpp2_setup_bm_pool();
+ + priv->nthreads = min_t(unsigned int, num_present_cpus(), + MVPP2_MAX_THREADS); + + shared = num_present_cpus() - priv->nthreads; + if (shared > 0) + bitmap_fill(&priv->lock_map, + min_t(int, shared, MVPP2_MAX_THREADS)); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz;
@@@ -5446,7 -5337,7 +5448,7 @@@ static int mvpp2_remove(struct platform mvpp2_bm_pool_destroy(pdev, priv, bm_pool); }
- for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
dma_free_coherent(&pdev->dev, diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d14c4051edd8,54118b77dc1f..5955b4d844cc --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -3049,8 -3049,8 +3049,8 @@@ static int mlx5e_alloc_drop_cq(struct m return mlx5e_alloc_cq_common(mdev, param, cq); }
-static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, - struct mlx5e_rq *drop_rq) +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; @@@ -3094,7 -3094,7 +3094,7 @@@ err_free_cq return err; }
-static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); mlx5e_free_rq(drop_rq); @@@ -4315,22 -4315,6 +4315,6 @@@ static int mlx5e_xdp(struct net_device } }
- #ifdef CONFIG_NET_POLL_CONTROLLER - /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without - * reenabling interrupts. - */ - static void mlx5e_netpoll(struct net_device *dev) - { - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_channels *chs = &priv->channels; - - int i; - - for (i = 0; i < chs->num; i++) - napi_schedule(&chs->c[i]->napi); - } - #endif - static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@@ -4356,9 -4340,6 +4340,6 @@@ #ifdef CONFIG_MLX5_EN_ARFS .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, - #endif #ifdef CONFIG_MLX5_ESWITCH /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, @@@ -4726,7 -4707,7 +4707,7 @@@ static void mlx5e_build_nic_netdev(stru mlx5e_tls_build_netdev(priv); }
-static void mlx5e_create_q_counters(struct mlx5e_priv *priv) +void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@@ -4744,7 -4725,7 +4725,7 @@@ } }
-static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { if (priv->q_counter) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); @@@ -4783,17 -4764,9 +4764,17 @@@ static int mlx5e_init_nic_rx(struct mlx struct mlx5_core_dev *mdev = priv->mdev; int err;
+ mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv); if (err) @@@ -4829,10 -4802,6 +4810,10 @@@ err_destroy_direct_rqts mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; }
@@@ -4844,8 -4813,6 +4825,8 @@@ static void mlx5e_cleanup_nic_rx(struc mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); }
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) @@@ -4989,6 -4956,7 +4970,6 @@@ err_cleanup_nic
int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; int err;
@@@ -4999,16 -4967,28 +4980,16 @@@ if (err) goto out;
- mlx5e_create_q_counters(priv); - - err = mlx5e_open_drop_rq(priv, &priv->drop_rq); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_q_counters; - } - err = profile->init_rx(priv); if (err) - goto err_close_drop_rq; + goto err_cleanup_tx;
if (profile->enable) profile->enable(priv);
return 0;
-err_close_drop_rq: - mlx5e_close_drop_rq(&priv->drop_rq); - -err_destroy_q_counters: - mlx5e_destroy_q_counters(priv); +err_cleanup_tx: profile->cleanup_tx(priv);
out: @@@ -5026,6 -5006,8 +5007,6 @@@ void mlx5e_detach_netdev(struct mlx5e_p flush_workqueue(priv->wq);
profile->cleanup_rx(priv); - mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 33a024274a1f,b492152c8881..88c33a8474eb --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -44,8 -44,8 +44,8 @@@ #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13 - #define MLXSW_SP1_FWREV_MINOR 1702 - #define MLXSW_SP1_FWREV_SUBMINOR 6 + #define MLXSW_SP1_FWREV_MINOR 1703 + #define MLXSW_SP1_FWREV_SUBMINOR 4 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@@ -331,10 -331,7 +331,10 @@@ static int mlxsw_sp_fw_rev_validate(str return -EINVAL; } if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && + (rev->minor > req_rev->minor || + (rev->minor == req_rev->minor && + rev->subminor >= req_rev->subminor))) return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", @@@ -2807,13 -2804,6 +2807,13 @@@ static int mlxsw_sp_port_ets_init(struc MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; }
/* Map all priorities to traffic class 0. */ diff --combined drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6dab45c1f21e,8ed38fd5a852..d05e37fcc1b2 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@@ -2091,10 -2091,10 +2091,10 @@@ static void nfp_ctrl_poll(unsigned lon { struct nfp_net_r_vector *r_vec = (void *)arg;
- spin_lock_bh(&r_vec->lock); + spin_lock(&r_vec->lock); nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); - spin_unlock_bh(&r_vec->lock); + spin_unlock(&r_vec->lock);
nfp_ctrl_rx(r_vec);
@@@ -2180,13 -2180,9 +2180,13 @@@ nfp_net_tx_ring_alloc(struct nfp_net_d
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->txds) + &tx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!tx_ring->txds) { + netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + tx_ring->cnt); goto err_alloc; + }
tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), GFP_KERNEL); @@@ -2338,13 -2334,9 +2338,13 @@@ nfp_net_rx_ring_alloc(struct nfp_net_d rx_ring->cnt = dp->rxd_cnt; rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->rxds) + &rx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rx_ring->rxds) { + netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + rx_ring->cnt); goto err_alloc; + }
rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), GFP_KERNEL); @@@ -3154,28 -3146,12 +3154,13 @@@ nfp_net_vlan_rx_kill_vid(struct net_dev return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); }
- #ifdef CONFIG_NET_POLL_CONTROLLER - static void nfp_net_netpoll(struct net_device *netdev) - { - struct nfp_net *nn = netdev_priv(netdev); - int i; - - /* nfp_net's NAPIs are statically allocated so even if there is a race - * with reconfig path this will simply try to schedule some disabled - * NAPI instances. - */ - for (i = 0; i < nn->dp.num_stack_tx_rings; i++) - napi_schedule_irqoff(&nn->r_vecs[i].napi); - } - #endif - static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nfp_net *nn = netdev_priv(netdev); int r;
+ /* Collect software stats */ for (r = 0; r < nn->max_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; @@@ -3201,14 -3177,6 +3186,14 @@@ stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } + + /* Add in device stats */ + stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); + stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); + stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); + + stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); + stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); }
static int nfp_net_set_features(struct net_device *netdev, @@@ -3536,9 -3504,6 +3521,6 @@@ const struct net_device_ops nfp_net_net .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = nfp_net_netpoll, - #endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, @@@ -3779,18 -3744,15 +3761,18 @@@ static void nfp_net_netdev_init(struct } if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; - if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && - nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL; - nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; - - netdev->hw_enc_features = netdev->hw_features; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; } + if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; + } + if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) + netdev->hw_enc_features = netdev->hw_features;
netdev->vlan_features = netdev->hw_features;
diff --combined drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6ce9a762cfc0,f5459de6d60a..8e8fa823d611 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@@ -190,10 -190,8 +190,8 @@@ qed_dcbx_dp_protocol(struct qed_hwfn *p
static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, - struct qed_hw_info *p_info, - bool enable, - u8 prio, - u8 tc, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@@ -206,19 -204,30 +204,30 @@@ else p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ + if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ - if (p_info->personality == personality) - qed_hw_info_set_offload_tc(p_info, tc); + if (p_hwfn->hw_info.personality == personality) + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); + + /* Configure dcbx vlan priority in doorbell block for roce EDPM */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + type == DCBX_PROTOCOL_ROCE) { + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); + } }
/* Update app protocol data and hw_info fields with the TLV info */ static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, - struct qed_hwfn *p_hwfn, - bool enable, - u8 prio, u8 tc, enum dcbx_protocol_type type) + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type) { - struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; int i; @@@ -231,7 -240,7 +240,7 @@@
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_info, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, prio, tc, type, personality); } } @@@ -253,9 -262,8 +262,9 @@@ qed_dcbx_get_app_protocol_type(struct q *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", - app_prio_bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; }
@@@ -266,7 -274,7 +275,7 @@@ * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ static int - qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, + qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, u8 dcbx_version) @@@ -310,7 -318,7 +319,7 @@@ enable = true; }
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); } } @@@ -332,7 -340,7 +341,7 @@@ continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); }
@@@ -342,7 -350,8 +351,8 @@@ /* Parse app TLV's to update TC information in hw_info structure for * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ - static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) + static int + qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dcbx_app_priority_feature *p_app; struct dcbx_app_priority_entry *p_tbl; @@@ -366,7 -375,7 +376,7 @@@ p_info = &p_hwfn->hw_info; num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, num_entries, dcbx_version); if (rc) return rc; @@@ -892,7 -901,7 +902,7 @@@ qed_dcbx_mib_update_event(struct qed_hw return rc;
if (type == QED_DCBX_OPERATIONAL_MIB) { - rc = qed_dcbx_process_mib_info(p_hwfn); + rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); if (!rc) { /* reconfigure tcs of QM queues according * to negotiation results @@@ -955,6 -964,7 +965,7 @@@ static void qed_dcbx_update_protocol_da p_data->dcb_enable_flag = p_src->arr[type].enable; p_data->dcb_priority = p_src->arr[type].priority; p_data->dcb_tc = p_src->arr[type].tc; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; }
/* Set pf update ramrod command params */ diff --combined drivers/net/ethernet/qlogic/qed/qed_dev.c index 128eb63ca54a,97f073fd3725..0fbeafeef7a0 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@@ -144,12 -144,6 +144,12 @@@ static void qed_qm_info_free(struct qed qm_info->wfq_data = NULL; }
+static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; +} + void qed_resc_free(struct qed_dev *cdev) { int i; @@@ -189,7 -183,6 +189,7 @@@ qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); } }
@@@ -1090,10 -1083,6 +1090,10 @@@ int qed_resc_alloc(struct qed_dev *cdev rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn); + if (rc) + goto alloc_err; }
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@@ -1717,7 -1706,7 +1717,7 @@@ static int qed_vf_start(struct qed_hwf int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; - u32 load_code, param, drv_mb_param; + u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; @@@ -1863,6 -1852,19 +1863,19 @@@
if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); + + /* Get pre-negotiated values for stag, bandwidth etc. */ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + drv_mb_param, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send GET_OEM_UPDATES attention request\n"); + drv_mb_param = STORM_FW_VERSION; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, diff --combined drivers/net/ethernet/qlogic/qed/qed_hsi.h index 21ec8091a24a,9b3ef00e5782..d4d08383c753 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@@ -274,8 -274,7 +274,8 @@@ struct core_rx_start_ramrod_data u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[6]; + u8 wipe_inner_vlan_pri_en; + u8 reserved[5]; };
/* Ramrod data for rx queue stop ramrod */ @@@ -352,8 -351,7 +352,8 @@@ struct core_tx_start_ramrod_data __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; - u8 resrved[3]; + u8 vport_id; + u8 resrved[2]; };
/* Ramrod data for tx queue stop ramrod */ @@@ -916,16 -914,6 +916,16 @@@ struct eth_rx_rate_limit __le16 reserved1; };
+/* Update RSS indirection table entry command */ +struct eth_tstorm_rss_update_data { + u8 valid; + u8 vport_id; + u8 ind_table_index; + u8 reserved; + __le16 ind_table_value; + __le16 reserved1; +}; + struct eth_ustorm_per_pf_stat { struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_mcast_bytes; @@@ -1253,10 -1241,6 +1253,10 @@@ struct rl_update_ramrod_data u8 rl_id_first; u8 rl_id_last; u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; __le32 rl_bc_rate; __le16 rl_max_rate; __le16 rl_r_ai; @@@ -1265,7 -1249,7 +1265,7 @@@ __le32 dcqcn_k_us; __le32 dcqcn_timeuot_us; __le32 qcn_timeuot_us; - __le32 reserved[2]; + __le32 reserved2; };
/* Slowpath Element (SPQE) */ @@@ -3338,25 -3322,6 +3338,25 @@@ enum dbg_status qed_dbg_read_attn(struc enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results);
+/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; +}; + /******************************** Constants **********************************/
#define MAX_NAME_LEN 16 @@@ -3372,13 -3337,6 +3372,13 @@@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
/** + * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * + * @param p_hwfn - HW device data + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * * @param status - a debug status code. @@@ -3423,7 -3381,8 +3423,7 @@@ enum dbg_status qed_print_idle_chk_resu u32 *num_warnings);
/** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace - * meta data. + * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). @@@ -3431,8 -3390,7 +3431,8 @@@ * @param data - pointer to MCP Trace meta data * @param size - size of MCP Trace meta data in dwords */ -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf);
/** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size @@@ -3467,45 -3425,19 +3467,45 @@@ enum dbg_status qed_print_mcp_trace_res char *results_buf);
/** + * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @param p_hwfn - HW device data + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** * @brief print_mcp_trace_line - Prints MCP Trace results for a single line * + * @param p_hwfn - HW device data * @param dump_buf - mcp trace dump buffer, starting from the header. * @param num_dumped_bytes - number of bytes that were dumped. * @param results_buf - buffer for printing the mcp trace results. * * @return error if the parsing fails, ok otherwise. */ -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf);
/** + * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @param p_hwfn - HW device data + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@@ -4371,161 -4303,154 +4371,161 @@@ void qed_set_rdma_error_level(struct qe (IRO[29].base + ((pf_id) * IRO[29].m1)) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ + (IRO[30].base + ((pf_id) * IRO[30].m1)) +#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[30].base + ((queue_id) * IRO[30].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + (IRO[31].base + ((queue_id) * IRO[31].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[31].base + ((rss_id) * IRO[31].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + (IRO[32].base + ((rss_id) * IRO[32].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[33].base + ((rss_id) * IRO[33].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, * BDqueue-id. */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
/* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
/* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + (IRO[44].base + ((pf_id) * IRO[44].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
/* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + (IRO[45].base + ((pf_id) * IRO[45].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
/* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
/* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
/* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
/* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
/* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
/* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) + (IRO[54].base + ((pf_id) * IRO[54].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
/* RoCE Error Statistics */ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
/* RoCE CQEs Statistics */ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
-static const struct iro iro_arr[59] = { +static const struct iro iro_arr[60] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@@ -4536,14 -4461,14 +4536,14 @@@ {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x2b78, 0x0, 0x0, 0x0, 0x78}, + {0x3ef8, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x6210, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0xa990, 0x30, 0x0, 0x0, 0x30}, {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4}, @@@ -4551,12 -4476,11 +4551,12 @@@ {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2d10, 0x80, 0x0, 0x0, 0x38}, - {0xf2b8, 0x78, 0x0, 0x0, 0x78}, + {0x4090, 0x80, 0x0, 0x0, 0x38}, + {0xfea8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xb010, 0x8, 0x0, 0x0, 0x8}, + {0xc00, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@@ -4568,23 -4492,23 +4568,23 @@@ {0x12908, 0x18, 0x0, 0x0, 0x10}, {0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8700, 0x40, 0x0, 0x0, 0x28}, - {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0x8f00, 0x40, 0x0, 0x0, 0x28}, + {0x10e30, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d48, 0x80, 0x0, 0x0, 0x10}, + {0x11298, 0x20, 0x0, 0x0, 0x20}, + {0x40c8, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa128, 0x8, 0x0, 0x0, 0x1}, - {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xa928, 0x8, 0x0, 0x0, 0x1}, + {0x11a30, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xa520, 0x18, 0x0, 0x0, 0x18}, - {0xa6a0, 0x8, 0x0, 0x0, 0x8}, - {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0xad20, 0x18, 0x0, 0x0, 0x18}, + {0xaea0, 0x8, 0x0, 0x0, 0x8}, + {0x13c38, 0x8, 0x0, 0x0, 0x8}, {0x13c50, 0x18, 0x0, 0x0, 0x18}, };
@@@ -5737,14 -5661,6 +5737,14 @@@ enum eth_filter_type MAX_ETH_FILTER_TYPE };
+/* inner to inner vlan priority translation configurations */ +struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; +}; + /* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, @@@ -6102,14 -6018,6 +6102,14 @@@ struct tx_queue_update_ramrod_data struct regpair reserved1[5]; };
+/* Inner to Inner VLAN priority map update mode */ +enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM +}; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@@ -6140,8 -6048,7 +6140,8 @@@ struct vport_start_ramrod_data u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[1]; + u8 wipe_inner_vlan_pri_en; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; };
/* Ramrod data for vport stop ramrod */ @@@ -6193,9 -6100,7 +6193,9 @@@ struct vport_update_ramrod_data_cmn u8 update_ctl_frame_checks_en_flg; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[15]; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 reserved[6]; };
struct vport_update_ramrod_mcast { @@@ -7024,6 -6929,11 +7024,6 @@@ struct mstorm_rdma_task_st_ctx struct regpair temp[4]; };
-/* The roce task context of Ustorm */ -struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; -}; - struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@@ -7097,6 -7007,8 +7097,6 @@@ struct e4_rdma_task_context struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; };
@@@ -7476,7 -7388,7 +7476,7 @@@ struct e4_ustorm_rdma_conn_ag_ctx #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; - u8 byte3; + u8 nvmf_only; __le16 conn_dpi; __le16 word1; __le32 cq_cons; @@@ -7919,12 -7831,7 +7919,12 @@@ struct roce_create_qp_req_ramrod_data struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[7]; + u8 reserved3[6]; + u8 flags2; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 __le16 regular_latency_phy_queue; __le16 dpi; }; @@@ -8047,7 -7954,6 +8047,7 @@@ enum roce_event_opcode ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, MAX_ROCE_EVENT_OPCODE };
@@@ -8056,13 -7962,7 +8056,13 @@@ struct roce_init_func_params u8 ll2_queue_id; u8 cnp_vlan_priority; u8 cnp_dscp; - u8 reserved; + u8 flags; +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 __le32 cnp_send_timeout; __le16 rl_offset; u8 rl_count_log; @@@ -8209,24 -8109,9 +8209,24 @@@ enum roce_ramrod_cmd_id ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, MAX_ROCE_RAMROD_CMD_ID };
+/* RoCE func init ramrod data */ +struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; +}; + struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; @@@ -12529,6 -12414,7 +12529,7 @@@ struct public_drv_mb #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 + #define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 @@@ -12656,6 -12542,9 +12657,9 @@@ #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+ #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 + #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 diff --combined drivers/net/ethernet/realtek/r8169.c index f882be49f493,ab30aaeac6d3..ed8ffd498c88 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -77,6 -77,8 +77,6 @@@ static const int multicast_filter_limi #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
-#define RTL8169_TX_TIMEOUT (6*HZ) - /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) #define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) @@@ -1352,8 -1354,7 +1352,8 @@@ static void rtl_irq_enable_all(struct r static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { rtl_irq_disable(tp); - rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + rtl_ack_events(tp, 0xffff); + /* PCI commit */ RTL_R8(tp, ChipCmd); }
@@@ -4047,17 -4048,38 +4047,26 @@@ static void rtl8169_init_phy(struct net rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - netif_dbg(tp, drv, dev, - "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(tp, 0x82, 0x01); - } - - pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); - - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02) { netif_dbg(tp, drv, dev, "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - netif_dbg(tp, drv, dev, - "Set PHY Reg 0x0bh = 0x00h\n"); - rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 }
/* We may have called phy_speed_down before */ phy_speed_up(dev->phydev);
genphy_soft_reset(dev->phydev); + + /* It was reported that chip version 33 ends up with 10MBit/Half on a + * 1GBit link after resuming from S3. For whatever reason the PHY on + * this chip doesn't properly start a renegotiation when soft-reset. + * Explicitly requesting a renegotiation fixes this. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_33 && + dev->phydev->autoneg == AUTONEG_ENABLE) + phy_restart_aneg(dev->phydev); }
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@@ -7346,9 -7368,11 +7355,9 @@@ static int rtl_init_one(struct pci_dev
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
- if ((sizeof(dma_addr_t) > 4) && - (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && - tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ if (!pci_is_pcie(pdev)) @@@ -7364,12 -7388,14 +7373,12 @@@
rtl_init_rxcfg(tp);
- rtl_irq_disable(tp); + rtl8169_irq_mask_and_ack(tp);
rtl_hw_initialize(tp);
rtl_hw_reset(tp);
- rtl_ack_events(tp, 0xffff); - pci_set_master(pdev);
rtl_init_mdio_ops(tp); @@@ -7409,6 -7435,7 +7418,6 @@@ dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
dev->ethtool_ops = &rtl8169_ethtool_ops; - dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
diff --combined drivers/net/ethernet/renesas/ravb.h index b2b18036380e,9b6bf557a2f5..1c6e4df94f01 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@@ -428,6 -428,7 +428,7 @@@ enum EIS_BIT EIS_CULF1 = 0x00000080, EIS_TFFF = 0x00000100, EIS_QFS = 0x00010000, + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), };
/* RIC0 */ @@@ -472,6 -473,7 +473,7 @@@ enum RIS0_BIT RIS0_FRF15 = 0x00008000, RIS0_FRF16 = 0x00010000, RIS0_FRF17 = 0x00020000, + RIS0_RESERVED = GENMASK(31, 18), };
/* RIC1 */ @@@ -528,6 -530,7 +530,7 @@@ enum RIS2_BIT RIS2_QFF16 = 0x00010000, RIS2_QFF17 = 0x00020000, RIS2_RFFF = 0x80000000, + RIS2_RESERVED = GENMASK(30, 18), };
/* TIC */ @@@ -544,6 -547,7 +547,7 @@@ enum TIS_BIT TIS_FTF1 = 0x00000002, /* Undocumented? */ TIS_TFUF = 0x00000100, TIS_TFWF = 0x00000200, + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) };
/* ISS */ @@@ -617,6 -621,7 +621,7 @@@ enum GIC_BIT enum GIS_BIT { GIS_PTCF = 0x00000001, /* Undocumented? */ GIS_PTMF = 0x00000004, + GIS_RESERVED = GENMASK(15, 10), };
/* GIE (R-Car Gen3 only) */ @@@ -954,10 -959,7 +959,10 @@@ enum RAVB_QUEUE #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 -#define NUM_TX_DESC 2 /* TX descriptors per packet */ + +/* TX descriptors per packet */ +#define NUM_TX_DESC_GEN2 2 +#define NUM_TX_DESC_GEN3 1
struct ravb_tstamp_skb { struct list_head list; @@@ -1036,7 -1038,6 +1041,7 @@@ struct ravb_private unsigned no_avb_link:1; unsigned avb_link_active_low:1; unsigned wol_enabled:1; + int num_tx_desc; /* TX descriptors per packet */ };
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --combined drivers/net/ethernet/renesas/ravb_main.c index 61d125750db9,d6f753925352..defed0d0c51d --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -182,7 -182,6 +182,7 @@@ static int ravb_tx_free(struct net_devi { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; + int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; int free_num = 0; int entry; @@@ -192,7 -191,7 +192,7 @@@ bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); + num_tx_desc); desc = &priv->tx_ring[q][entry]; txed = desc->die_dt == DT_FEMPTY; if (free_txed_only && !txed) @@@ -201,12 -200,12 +201,12 @@@ dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + if (priv->tx_skb[q][entry / num_tx_desc]) { dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; dev_kfree_skb_any(priv->tx_skb[q][entry]); priv->tx_skb[q][entry] = NULL; if (txed) @@@ -225,7 -224,6 +225,7 @@@ static void ravb_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; int ring_size; int i;
@@@ -251,7 -249,7 +251,7 @@@ ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@@ -280,13 -278,12 +280,13 @@@ static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - NUM_TX_DESC; + num_tx_desc; dma_addr_t dma_addr; int i;
@@@ -321,10 -318,8 +321,10 @@@ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; i++, tx_desc++) { tx_desc->die_dt = DT_EEMPTY; - tx_desc++; - tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } } tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@@ -344,7 -339,6 +344,7 @@@ static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct sk_buff *skb; int ring_size; int i; @@@ -368,13 -362,11 +368,13 @@@ priv->rx_skb[q][i] = skb; }
- /* Allocate rings for the aligned buffers */ - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + - DPTR_ALIGN - 1, GFP_KERNEL); - if (!priv->tx_align[q]) - goto error; + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + }
/* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); @@@ -388,7 -380,7 +388,7 @@@
/* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@@ -747,10 -739,11 +747,11 @@@ static void ravb_error_interrupt(struc u32 eis, ris2;
eis = ravb_read(ndev, EIS); - ravb_write(ndev, ~EIS_QFS, EIS); + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + RIS2);
/* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) @@@ -803,7 -796,7 +804,7 @@@ static bool ravb_timestamp_interrupt(st u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); ravb_get_tx_tstamp(ndev); return true; } @@@ -938,7 -931,7 +939,7 @@@ static int ravb_poll(struct napi_struc /* Processing RX Descriptor Ring */ if (ris0 & mask) { /* Clear RX interrupt */ - ravb_write(ndev, ~mask, RIS0); + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); if (ravb_rx(ndev, "a, q)) goto out; } @@@ -946,7 -939,7 +947,7 @@@ if (tis & mask) { spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ - ravb_write(ndev, ~mask, TIS); + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); mmiowb(); @@@ -1081,11 -1074,8 +1082,11 @@@ static int ravb_phy_init(struct net_dev netdev_info(ndev, "limited PHY to 100Mbit/s\n"); }
- /* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
phy_attached_info(phydev);
@@@ -1495,7 -1485,6 +1496,7 @@@ static void ravb_tx_timeout_work(struc static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@@ -1507,7 -1496,7 +1508,7 @@@
spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * - NUM_TX_DESC) { + num_tx_desc) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); @@@ -1518,55 -1507,41 +1519,55 @@@ if (skb_put_padto(skb, ETH_ZLEN)) goto exit;
- entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; - - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + - entry / NUM_TX_DESC * DPTR_ALIGN; - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - /* Zero length DMA descriptors are problematic as they seem to - * terminate DMA transfers. Avoid them by simply using a length of - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. - * - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of - * data by the call to skb_put_padto() above this is safe with - * respect to both the length of the first DMA descriptor (len) - * overflowing the available data and the length of the second DMA - * descriptor (skb->len - len) being negative. - */ - if (len == 0) - len = DPTR_ALIGN; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; + + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN;
- memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto drop; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop;
- desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(len); - desc->dptr = cpu_to_le32(dma_addr); + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr);
- buffer = skb->data + len; - len = skb->len - len; - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto unmap; + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap;
- desc++; + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr);
@@@ -1574,11 -1549,9 +1575,11 @@@ if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, len, - DMA_TO_DEVICE); + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } goto unmap; } ts_skb->skb = skb; @@@ -1595,18 -1568,15 +1596,18 @@@ skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FEND; - desc--; - desc->die_dt = DT_FSTART; - + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
- priv->cur_tx[q] += NUM_TX_DESC; + priv->cur_tx[q] += num_tx_desc; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + (priv->num_tx_ring[q] - 1) * num_tx_desc && !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q);
@@@ -1620,7 -1590,7 +1621,7 @@@ unmap le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + priv->tx_skb[q][entry / num_tx_desc] = NULL; goto exit; }
@@@ -2106,9 -2076,6 +2107,9 @@@ static int ravb_probe(struct platform_d ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU;
+ priv->num_tx_desc = chip_id == RCAR_GEN2 ? + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3715a0a4af3c,75896d6ba6e2..076a8be18d67 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -148,12 -148,14 +148,14 @@@ static void stmmac_verify_args(void static void stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&rx_q->napi); + napi_disable(&ch->napi); } }
@@@ -164,12 -166,14 +166,14 @@@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&rx_q->napi); + napi_enable(&ch->napi); } }
@@@ -987,20 -991,17 +991,20 @@@ static int stmmac_init_phy(struct net_d if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || (max_speed < 1000 && max_speed > 0)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100);
/* * Half-duplex mode not supported with multiqueue * half-duplex can only works with single queue */ - if (tx_cnt > 1) - phydev->supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); + if (tx_cnt > 1) { + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + }
/* * Broken HW is sometimes missing the pull-up resistor on the @@@ -1846,18 -1847,18 +1850,18 @@@ static void stmmac_dma_operation_mode(s * @queue: TX queue index * Description: it reclaims the transmit resources after transmission completes. */ - static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) + static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry; + unsigned int entry, count = 0;
- netif_tx_lock(priv->dev); + __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
entry = tx_q->dirty_tx; - while (entry != tx_q->cur_tx) { + while ((entry != tx_q->cur_tx) && (count < budget)) { struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; int status; @@@ -1873,6 -1874,8 +1877,8 @@@ if (unlikely(status & tx_dma_own)) break;
+ count++; + /* Make sure descriptor fields are read after reading * the own bit. */ @@@ -1940,7 -1943,10 +1946,10 @@@ stmmac_enable_eee_mode(priv); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } - netif_tx_unlock(priv->dev); + + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); + + return count; }
/** @@@ -2023,6 -2029,33 +2032,33 @@@ static bool stmmac_safety_feat_interrup return false; }
+ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) + { + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); + struct stmmac_channel *ch = &priv->channel[chan]; + bool needs_work = false; + + if ((status & handle_rx) && ch->has_rx) { + needs_work = true; + } else { + status &= ~handle_rx; + } + + if ((status & handle_tx) && ch->has_tx) { + needs_work = true; + } else { + status &= ~handle_tx; + } + + if (needs_work && napi_schedule_prep(&ch->napi)) { + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + __napi_schedule(&ch->napi); + } + + return status; + } + /** * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure @@@ -2037,57 -2070,14 +2073,14 @@@ static void stmmac_dma_interrupt(struc u32 channels_to_check = tx_channel_count > rx_channel_count ? tx_channel_count : rx_channel_count; u32 chan; - bool poll_scheduled = false; int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) channels_to_check = ARRAY_SIZE(status);
- /* Each DMA channel can be used for rx and tx simultaneously, yet - * napi_struct is embedded in struct stmmac_rx_queue rather than in a - * stmmac_channel struct. - * Because of this, stmmac_poll currently checks (and possibly wakes) - * all tx queues rather than just a single tx queue. - */ for (chan = 0; chan < channels_to_check; chan++) - status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); - - for (chan = 0; chan < rx_channel_count; chan++) { - if (likely(status[chan] & handle_rx)) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - poll_scheduled = true; - } - } - } - - /* If we scheduled poll, we already know that tx queues will be checked. - * If we didn't schedule poll, see if any DMA channel (used by tx) has a - * completed transmission, if so, call stmmac_poll (once). - */ - if (!poll_scheduled) { - for (chan = 0; chan < tx_channel_count; chan++) { - if (status[chan] & handle_tx) { - /* It doesn't matter what rx queue we choose - * here. We use 0 since it always exists. - */ - struct stmmac_rx_queue *rx_q = - &priv->rx_queue[0]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, - priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - } - break; - } - } - } + status[chan] = stmmac_napi_check(priv, chan);
for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { @@@ -2223,8 -2213,7 +2216,7 @@@ static int stmmac_init_dma_engine(struc stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); + tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan); } @@@ -2236,6 -2225,13 +2228,13 @@@ return ret; }
+ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) + { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } + /** * stmmac_tx_timer - mitigation sw timer for tx. * @data: data pointer @@@ -2244,13 -2240,14 +2243,14 @@@ */ static void stmmac_tx_timer(struct timer_list *t) { - struct stmmac_priv *priv = from_timer(priv, t, txtimer); - u32 tx_queues_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); + struct stmmac_priv *priv = tx_q->priv_data; + struct stmmac_channel *ch; + + ch = &priv->channel[tx_q->queue_index];
- /* let's scan all the tx queues */ - for (queue = 0; queue < tx_queues_count; queue++) - stmmac_tx_clean(priv, queue); + if (likely(napi_schedule_prep(&ch->napi))) + __napi_schedule(&ch->napi); }
/** @@@ -2263,11 -2260,17 +2263,17 @@@ */ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) { + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 chan; + priv->tx_coal_frames = STMMAC_TX_FRAMES; priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - timer_setup(&priv->txtimer, stmmac_tx_timer, 0); - priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); - add_timer(&priv->txtimer); + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + + timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); + } }
static void stmmac_set_rings_length(struct stmmac_priv *priv) @@@ -2595,6 -2598,7 +2601,7 @@@ static void stmmac_hw_teardown(struct n static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; int ret;
stmmac_check_ether_addr(priv); @@@ -2691,7 -2695,9 +2698,9 @@@ irq_error if (dev->phydev) phy_stop(dev->phydev);
- del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); + stmmac_hw_teardown(dev); init_error: free_dma_desc_resources(priv); @@@ -2711,6 -2717,7 +2720,7 @@@ dma_desc_error static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan;
if (priv->eee_enabled) del_timer_sync(&priv->eee_ctrl_timer); @@@ -2725,7 -2732,8 +2735,8 @@@
stmmac_disable_all_queues(priv);
- del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */ free_irq(dev->irq, dev); @@@ -2939,14 -2947,13 +2950,13 @@@ static netdev_tx_t stmmac_tso_xmit(stru priv->xstats.tx_tso_nfrags += nfrags;
/* Manage tx mitigation */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); }
skb_tx_timestamp(skb); @@@ -2995,6 -3002,7 +3005,7 @@@
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK; @@@ -3149,14 -3157,13 +3160,13 @@@ static netdev_tx_t stmmac_xmit(struct s * This approach takes care about the fragments: desc is the first * element in case of no SG. */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); }
skb_tx_timestamp(skb); @@@ -3202,6 -3209,8 +3212,8 @@@ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK; @@@ -3322,6 -3331,7 +3334,7 @@@ static inline void stmmac_rx_refill(str static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; unsigned int entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; unsigned int next_entry; @@@ -3494,7 -3504,7 +3507,7 @@@ else skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&rx_q->napi, skb); + napi_gro_receive(&ch->napi, skb);
priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@@ -3517,27 -3527,33 +3530,33 @@@ * Description : * To look at the incoming frames and clear the tx resources. */ - static int stmmac_poll(struct napi_struct *napi, int budget) + static int stmmac_napi_poll(struct napi_struct *napi, int budget) { - struct stmmac_rx_queue *rx_q = - container_of(napi, struct stmmac_rx_queue, napi); - struct stmmac_priv *priv = rx_q->priv_data; - u32 tx_count = priv->plat->tx_queues_to_use; - u32 chan = rx_q->queue_index; - int work_done = 0; - u32 queue; + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, napi); + struct stmmac_priv *priv = ch->priv_data; + int work_done = 0, work_rem = budget; + u32 chan = ch->index;
priv->xstats.napi_poll++;
- /* check all the queues */ - for (queue = 0; queue < tx_count; queue++) - stmmac_tx_clean(priv, queue); + if (ch->has_tx) { + int done = stmmac_tx_clean(priv, work_rem, chan);
- work_done = stmmac_rx(priv, budget, rx_q->queue_index); - if (work_done < budget) { - napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + work_done += done; + work_rem -= done; + } + + if (ch->has_rx) { + int done = stmmac_rx(priv, work_rem, chan); + + work_done += done; + work_rem -= done; } + + if (work_done < budget && napi_complete_done(napi, work_done)) + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + return work_done; }
@@@ -4201,8 -4217,8 +4220,8 @@@ int stmmac_dvr_probe(struct device *dev { struct net_device *ndev = NULL; struct stmmac_priv *priv; + u32 queue, maxq; int ret = 0; - u32 queue;
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), MTL_MAX_TX_QUEUES, @@@ -4325,11 -4341,22 +4344,22 @@@ "Enable RX Mitigation via HW Watchdog Timer\n"); }
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + /* Setup channels NAPI */ + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
- netif_napi_add(ndev, &rx_q->napi, stmmac_poll, - (8 * priv->plat->rx_queues_to_use)); + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + + if (queue < priv->plat->rx_queues_to_use) + ch->has_rx = true; + if (queue < priv->plat->tx_queues_to_use) + ch->has_tx = true; + + netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, + NAPI_POLL_WEIGHT); }
mutex_init(&priv->lock); @@@ -4375,10 -4402,10 +4405,10 @@@ error_netdev_register priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: - for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&rx_q->napi); + netif_napi_del(&ch->napi); } error_hw_init: destroy_workqueue(priv->wq); diff --combined drivers/net/tun.c index 2a2cd35853b7,e2648b5a3861..3eb88b7147f0 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -113,6 -113,7 +113,6 @@@ do { } while (0) #endif
-#define TUN_HEADROOM 256 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */ @@@ -868,9 -869,6 +868,9 @@@ static int tun_attach(struct tun_struc tun_napi_init(tun, tfile, napi); }
+ if (rtnl_dereference(tun->xdp_prog)) + sock_set_flag(&tfile->sk, SOCK_XDP); + tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra @@@ -1155,43 -1153,6 +1155,6 @@@ static netdev_features_t tun_net_fix_fe
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } - #ifdef CONFIG_NET_POLL_CONTROLLER - static void tun_poll_controller(struct net_device *dev) - { - /* - * Tun only receives frames when: - * 1) the char device endpoint gets data from user space - * 2) the tun socket gets a sendmsg call from user space - * If NAPI is not enabled, since both of those are synchronous - * operations, we are guaranteed never to have pending data when we poll - * for it so there is nothing to do here but return. - * We need this though so netpoll recognizes us as an interface that - * supports polling, which enables bridge devices in virt setups to - * still use netconsole - * If NAPI is enabled, however, we need to schedule polling for all - * queues unless we are using napi_gro_frags(), which we call in - * process context and not in NAPI context. - */ - struct tun_struct *tun = netdev_priv(dev); - - if (tun->flags & IFF_NAPI) { - struct tun_file *tfile; - int i; - - if (tun_napi_frags_enabled(tun)) - return; - - rcu_read_lock(); - for (i = 0; i < tun->numqueues; i++) { - tfile = rcu_dereference(tun->tfiles[i]); - if (tfile->napi_enabled) - napi_schedule(&tfile->napi); - } - rcu_read_unlock(); - } - return; - } - #endif
static void tun_set_headroom(struct net_device *dev, int new_hr) { @@@ -1243,29 -1204,13 +1206,29 @@@ static int tun_xdp_set(struct net_devic struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile; struct bpf_prog *old_prog; + int i;
old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog);
+ for (i = 0; i < tun->numqueues; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + list_for_each_entry(tfile, &tun->disabled, next) { + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + return 0; }
@@@ -1301,9 -1246,6 +1264,6 @@@ static const struct net_device_ops tun_ .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, - #endif .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, }; @@@ -1383,9 -1325,6 +1343,6 @@@ static const struct net_device_ops tap_ .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, - #endif .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, @@@ -1635,55 -1574,6 +1592,55 @@@ static bool tun_can_build_skb(struct tu return true; }
+static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, + int buflen, int len, int pad) +{ + struct sk_buff *skb = build_skb(buf, buflen); + + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_reserve(skb, pad); + skb_put(skb, len); + + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + return skb; +} + +static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, + struct xdp_buff *xdp, u32 act) +{ + int err; + + switch (act) { + case XDP_REDIRECT: + err = xdp_do_redirect(tun->dev, xdp, xdp_prog); + if (err) + return err; + break; + case XDP_TX: + err = tun_xdp_tx(tun->dev, xdp); + if (err < 0) + return err; + break; + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(tun->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + this_cpu_inc(tun->pcpu_stats->rx_dropped); + break; + } + + return act; +} + static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, @@@ -1691,17 -1581,18 +1648,17 @@@ int len, int *skb_xdp) { struct page_frag *alloc_frag = ¤t->task_frag; - struct sk_buff *skb; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int delta = 0; char *buf; size_t copied; - int err, pad = TUN_RX_PAD; + int pad = TUN_RX_PAD; + int err = 0;
rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) - pad += TUN_HEADROOM; + pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock();
@@@ -1720,18 -1611,17 +1677,18 @@@ * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ - if (hdr->gso_type || !xdp_prog) + if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; - else - *skb_xdp = 0; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad); + } + + *skb_xdp = 0;
local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); - if (xdp_prog && !*skb_xdp) { + if (xdp_prog) { struct xdp_buff xdp; - void *orig_data; u32 act;
xdp.data_hard_start = buf; @@@ -1739,33 -1629,66 +1696,33 @@@ xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; xdp.rxq = &tfile->xdp_rxq; - orig_data = xdp.data; - act = bpf_prog_run_xdp(xdp_prog, &xdp);
- switch (act) { - case XDP_REDIRECT: - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); - xdp_do_flush_map(); - if (err) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_TX: + act = bpf_prog_run_xdp(xdp_prog, &xdp); + if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_tx(tun->dev, &xdp) < 0) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_PASS: - delta = orig_data - xdp.data; - len = xdp.data_end - xdp.data; - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ - case XDP_DROP: - goto err_xdp; } - } + err = tun_xdp_act(tun, xdp_prog, &xdp, act); + if (err < 0) + goto err_xdp; + if (err == XDP_REDIRECT) + xdp_do_flush_map(); + if (err != XDP_PASS) + goto out;
- skb = build_skb(buf, buflen); - if (!skb) { - rcu_read_unlock(); - local_bh_enable(); - return ERR_PTR(-ENOMEM); + pad = xdp.data - xdp.data_hard_start; + len = xdp.data_end - xdp.data; } - - skb_reserve(skb, pad - delta); - skb_put(skb, len); - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - rcu_read_unlock(); local_bh_enable();
- return skb; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
-err_redirect: - put_page(alloc_frag->page); err_xdp: + put_page(alloc_frag->page); +out: rcu_read_unlock(); local_bh_enable(); - this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; }
@@@ -2426,133 -2349,18 +2383,133 @@@ static void tun_sock_write_space(struc kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); }
+static int tun_xdp_one(struct tun_struct *tun, + struct tun_file *tfile, + struct xdp_buff *xdp, int *flush) +{ + struct tun_xdp_hdr *hdr = xdp->data_hard_start; + struct virtio_net_hdr *gso = &hdr->gso; + struct tun_pcpu_stats *stats; + struct bpf_prog *xdp_prog; + struct sk_buff *skb = NULL; + u32 rxhash = 0, act; + int buflen = hdr->buflen; + int err = 0; + bool skb_xdp = false; + + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) { + if (gso->gso_type) { + skb_xdp = true; + goto build; + } + xdp_set_data_meta_invalid(xdp); + xdp->rxq = &tfile->xdp_rxq; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + err = tun_xdp_act(tun, xdp_prog, xdp, act); + if (err < 0) { + put_page(virt_to_head_page(xdp->data)); + return err; + } + + switch (err) { + case XDP_REDIRECT: + *flush = true; + /* fall through */ + case XDP_TX: + return 0; + case XDP_PASS: + break; + default: + put_page(virt_to_head_page(xdp->data)); + return 0; + } + } + +build: + skb = build_skb(xdp->data_hard_start, buflen); + if (!skb) { + err = -ENOMEM; + goto out; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + + if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + kfree_skb(skb); + err = -EINVAL; + goto out; + } + + skb->protocol = eth_type_trans(skb, tun->dev); + skb_reset_network_header(skb); + skb_probe_transport_header(skb, 0); + + if (skb_xdp) { + err = do_xdp_generic(xdp_prog, skb); + if (err != XDP_PASS) + goto out; + } + + if (!rcu_dereference(tun->steering_prog)) + rxhash = __skb_get_hash_symmetric(skb); + + netif_receive_skb(skb); + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(stats); + + if (rxhash) + tun_flow_update(tun, rxhash, tfile); + +out: + return err; +} + static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { - int ret; + int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); + struct tun_msg_ctl *ctl = m->msg_control; + struct xdp_buff *xdp;
if (!tun) return -EBADFD;
- ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, + if (ctl && (ctl->type == TUN_MSG_PTR)) { + int n = ctl->num; + int flush = 0; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < n; i++) { + xdp = &((struct xdp_buff *)ctl->ptr)[i]; + tun_xdp_one(tun, tfile, xdp, &flush); + } + + if (flush) + xdp_do_flush_map(); + + rcu_read_unlock(); + local_bh_enable(); + + ret = total_len; + goto out; + } + + ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); +out: tun_put(tun); return ret; } diff --combined net/batman-adv/soft-interface.c index 2c7d95727f90,626ddca332db..5db5a0a4c959 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -574,15 -574,20 +574,20 @@@ int batadv_softif_create_vlan(struct ba struct batadv_softif_vlan *vlan; int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock); + vlan = batadv_softif_vlan_get(bat_priv, vid); if (vlan) { batadv_softif_vlan_put(vlan); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -EEXIST; }
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) + if (!vlan) { + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -ENOMEM; + }
vlan->bat_priv = bat_priv; vlan->vid = vid; @@@ -590,17 -595,23 +595,23 @@@
atomic_set(&vlan->ap_isolation, 0);
+ kref_get(&vlan->refcount); + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + + /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the + * sleeping behavior of the sysfs functions and the fs_reclaim lock + */ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (err) { - kfree(vlan); + /* ref for the function */ + batadv_softif_vlan_put(vlan); + + /* ref for the list */ + batadv_softif_vlan_put(vlan); return err; }
- spin_lock_bh(&bat_priv->softif_vlan_list_lock); - kref_get(&vlan->refcount); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ @@@ -833,6 -844,7 +844,6 @@@ static int batadv_softif_init_late(stru atomic_set(&bat_priv->frag_seqno, random_seqno);
bat_priv->primary_if = NULL; - bat_priv->num_ifaces = 0;
batadv_nc_init_bat_priv(bat_priv);
@@@ -1050,7 -1062,6 +1061,7 @@@ static void batadv_softif_init_early(st dev->needs_free_netdev = true; dev->priv_destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; + dev->features |= NETIF_F_LLTX; dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables diff --combined net/core/ethtool.c index 9d4e56d97080,234a0ec2e932..96afc55aa61e --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@@ -539,17 -539,47 +539,17 @@@ struct ethtool_link_usettings } link_modes; };
-/* Internal kernel helper to query a device ethtool_link_settings. - * - * Backward compatibility note: for compatibility with legacy drivers - * that implement only the ethtool_cmd API, this has to work with both - * drivers implementing get_link_ksettings API and drivers - * implementing get_settings API. When drivers implement get_settings - * and report ethtool_cmd deprecated fields - * (transceiver/maxrxpkt/maxtxpkt), these fields are silently ignored - * because the resulting struct ethtool_link_settings does not report them. - */ +/* Internal kernel helper to query a device ethtool_link_settings. */ int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { - int err; - struct ethtool_cmd cmd; - ASSERT_RTNL();
- if (dev->ethtool_ops->get_link_ksettings) { - memset(link_ksettings, 0, sizeof(*link_ksettings)); - return dev->ethtool_ops->get_link_ksettings(dev, - link_ksettings); - } - - /* driver doesn't support %ethtool_link_ksettings API. revert to - * legacy %ethtool_cmd API, unless it's not supported either. - * TODO: remove when ethtool_ops::get_settings disappears internally - */ - if (!dev->ethtool_ops->get_settings) + if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP;
- memset(&cmd, 0, sizeof(cmd)); - cmd.cmd = ETHTOOL_GSET; - err = dev->ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - - /* we ignore deprecated fields transceiver/maxrxpkt/maxtxpkt - */ - convert_legacy_settings_to_link_ksettings(link_ksettings, &cmd); - return err; + memset(link_ksettings, 0, sizeof(*link_ksettings)); + return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); } EXPORT_SYMBOL(__ethtool_get_link_ksettings);
@@@ -605,7 -635,16 +605,7 @@@ store_link_ksettings_for_user(void __us return 0; }
-/* Query device for its ethtool_link_settings. - * - * Backward compatibility note: this function must fail when driver - * does not implement ethtool::get_link_ksettings, even if legacy - * ethtool_ops::get_settings is implemented. This tells new versions - * of ethtool that they should use the legacy API %ETHTOOL_GSET for - * this driver, so that they can correctly access the ethtool_cmd - * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver - * implements ethtool_ops::get_settings anymore. - */ +/* Query device for its ethtool_link_settings. */ static int ethtool_get_link_ksettings(struct net_device *dev, void __user *useraddr) { @@@ -613,6 -652,7 +613,6 @@@ struct ethtool_link_ksettings link_ksettings;
ASSERT_RTNL(); - if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP;
@@@ -659,7 -699,16 +659,7 @@@ return store_link_ksettings_for_user(useraddr, &link_ksettings); }
-/* Update device ethtool_link_settings. - * - * Backward compatibility note: this function must fail when driver - * does not implement ethtool::set_link_ksettings, even if legacy - * ethtool_ops::set_settings is implemented. This tells new versions - * of ethtool that they should use the legacy API %ETHTOOL_SSET for - * this driver, so that they can correctly update the ethtool_cmd - * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver - * implements ethtool_ops::get_settings anymore. - */ +/* Update device ethtool_link_settings. */ static int ethtool_set_link_ksettings(struct net_device *dev, void __user *useraddr) { @@@ -697,31 -746,51 +697,31 @@@
/* Query device for its ethtool_cmd settings. * - * Backward compatibility note: for compatibility with legacy ethtool, - * this has to work with both drivers implementing get_link_ksettings - * API and drivers implementing get_settings API. When drivers - * implement get_link_ksettings and report higher link mode bits, a - * kernel warning is logged once (with name of 1st driver/device) to - * recommend user to upgrade ethtool, but the command is successful - * (only the lower link mode bits reported back to user). + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now implemented via get_link_ksettings. When driver reports higher link mode + * bits, a kernel warning is logged once (with name of 1st driver/device) to + * recommend user to upgrade ethtool, but the command is successful (only the + * lower link mode bits reported back to user). Deprecated fields from + * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. */ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) { + struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd; + int err;
ASSERT_RTNL(); + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP;
- if (dev->ethtool_ops->get_link_ksettings) { - /* First, use link_ksettings API if it is supported */ - int err; - struct ethtool_link_ksettings link_ksettings; - - memset(&link_ksettings, 0, sizeof(link_ksettings)); - err = dev->ethtool_ops->get_link_ksettings(dev, - &link_ksettings); - if (err < 0) - return err; - convert_link_ksettings_to_legacy_settings(&cmd, - &link_ksettings); - - /* send a sensible cmd tag back to user */ - cmd.cmd = ETHTOOL_GSET; - } else { - /* driver doesn't support %ethtool_link_ksettings - * API. revert to legacy %ethtool_cmd API, unless it's - * not supported either. - */ - int err; - - if (!dev->ethtool_ops->get_settings) - return -EOPNOTSUPP; + memset(&link_ksettings, 0, sizeof(link_ksettings)); + err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); + if (err < 0) + return err; + convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings);
- memset(&cmd, 0, sizeof(cmd)); - cmd.cmd = ETHTOOL_GSET; - err = dev->ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - } + /* send a sensible cmd tag back to user */ + cmd.cmd = ETHTOOL_GSET;
if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; @@@ -731,29 -800,48 +731,29 @@@
/* Update device link settings with given ethtool_cmd. * - * Backward compatibility note: for compatibility with legacy ethtool, - * this has to work with both drivers implementing set_link_ksettings - * API and drivers implementing set_settings API. When drivers - * implement set_link_ksettings and user's request updates deprecated - * ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel - * warning is logged once (with name of 1st driver/device) to - * recommend user to upgrade ethtool, and the request is rejected. + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now always implemented via set_link_settings. When user's request updates + * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel + * warning is logged once (with name of 1st driver/device) to recommend user to + * upgrade ethtool, and the request is rejected. */ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) { + struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd;
ASSERT_RTNL();
if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; - - /* first, try new %ethtool_link_ksettings API. */ - if (dev->ethtool_ops->set_link_ksettings) { - struct ethtool_link_ksettings link_ksettings; - - if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, - &cmd)) - return -EINVAL; - - link_ksettings.base.cmd = ETHTOOL_SLINKSETTINGS; - link_ksettings.base.link_mode_masks_nwords - = __ETHTOOL_LINK_MODE_MASK_NU32; - return dev->ethtool_ops->set_link_ksettings(dev, - &link_ksettings); - } - - /* legacy %ethtool_cmd API */ - - /* TODO: return -EOPNOTSUPP when ethtool_ops::get_settings - * disappears internally - */ - - if (!dev->ethtool_ops->set_settings) + if (!dev->ethtool_ops->set_link_ksettings) return -EOPNOTSUPP;
- return dev->ethtool_ops->set_settings(dev, &cmd); + if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) + return -EINVAL; + link_ksettings.base.link_mode_masks_nwords = + __ETHTOOL_LINK_MODE_MASK_NU32; + return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); }
static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, @@@ -2536,6 -2624,7 +2536,7 @@@ int dev_ethtool(struct net *net, struc case ETHTOOL_GPHYSTATS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: + case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: diff --combined net/ipv6/addrconf.c index bfe3ec7ecb14,c63ccce6425f..a9a317322388 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -997,7 -997,6 +997,7 @@@ ipv6_add_addr(struct inet6_dev *idev, s if (addr_type == IPV6_ADDR_ANY || addr_type & IPV6_ADDR_MULTICAST || (!(idev->dev->flags & IFF_LOOPBACK) && + !netif_is_l3_master(idev->dev) && addr_type & IPV6_ADDR_LOOPBACK)) return ERR_PTR(-EADDRNOTAVAIL);
@@@ -4202,7 -4201,6 +4202,6 @@@ static struct inet6_ifaddr *if6_get_fir p++; continue; } - state->offset++; return ifa; }
@@@ -4226,13 -4224,12 +4225,12 @@@ static struct inet6_ifaddr *if6_get_nex return ifa; }
+ state->offset = 0; while (++state->bucket < IN6_ADDR_HSIZE) { - state->offset = 0; hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket], addr_lst) { if (!net_eq(dev_net(ifa->idev->dev), net)) continue; - state->offset++; return ifa; } } @@@ -4492,7 -4489,6 +4490,7 @@@ static const struct nla_policy ifa_ipv6 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, [IFA_FLAGS] = { .len = sizeof(u32) }, [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, + [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, };
static int @@@ -4795,32 -4791,19 +4793,32 @@@ static inline int inet6_ifaddr_msgsize( + nla_total_size(4) /* IFA_RT_PRIORITY */; }
+struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; +}; + static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet6_fill_args *args) { struct nlmsghdr *nlh; u32 preferred, valid;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), ifa->idev->dev->ifindex);
+ if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + goto error; + if (!((ifa->flags&IFA_F_PERMANENT) && (ifa->prefered_lft == INFINITY_LIFE_TIME))) { preferred = ifa->prefered_lft; @@@ -4870,7 -4853,7 +4868,7 @@@ error }
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, - u32 portid, u32 seq, int event, u16 flags) + struct inet6_fill_args *args) { struct nlmsghdr *nlh; u8 scope = RT_SCOPE_UNIVERSE; @@@ -4879,15 -4862,10 +4877,15 @@@ if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE) scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE;
+ if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + return -EMSGSIZE; + put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 || put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp, @@@ -4901,7 -4879,7 +4899,7 @@@ }
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet6_fill_args *args) { struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt); int ifindex = dev ? dev->ifindex : 1; @@@ -4911,15 -4889,10 +4909,15 @@@ if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE) scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE;
+ if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + return -EMSGSIZE; + put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 || put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp, @@@ -4941,14 -4914,8 +4939,14 @@@ enum addr_type_t /* called with rcu_read_lock() */ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, struct netlink_callback *cb, enum addr_type_t type, - int s_ip_idx, int *p_ip_idx) + int s_ip_idx, int *p_ip_idx, int netnsid) { + struct inet6_fill_args fillargs = { + .portid = NETLINK_CB(cb->skb).portid, + .seq = cb->nlh->nlmsg_seq, + .flags = NLM_F_MULTI, + .netnsid = netnsid, + }; struct ifmcaddr6 *ifmca; struct ifacaddr6 *ifaca; int err = 1; @@@ -4958,13 -4925,16 +4956,13 @@@ switch (type) { case UNICAST_ADDR: { struct inet6_ifaddr *ifa; + fillargs.event = RTM_NEWADDR;
/* unicast address incl. temp addr */ list_for_each_entry(ifa, &idev->addr_list, if_list) { if (++ip_idx < s_ip_idx) continue; - err = inet6_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWADDR, - NLM_F_MULTI); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) break; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@@ -4972,26 -4942,31 +4970,26 @@@ break; } case MULTICAST_ADDR: + fillargs.event = RTM_GETMULTICAST; + /* multicast address */ for (ifmca = idev->mc_list; ifmca; ifmca = ifmca->next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - err = inet6_fill_ifmcaddr(skb, ifmca, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_GETMULTICAST, - NLM_F_MULTI); + err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs); if (err < 0) break; } break; case ANYCAST_ADDR: + fillargs.event = RTM_GETANYCAST; /* anycast address */ for (ifaca = idev->ac_list; ifaca; ifaca = ifaca->aca_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - err = inet6_fill_ifacaddr(skb, ifaca, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_GETANYCAST, - NLM_F_MULTI); + err = inet6_fill_ifacaddr(skb, ifaca, &fillargs); if (err < 0) break; } @@@ -5008,9 -4983,6 +5006,9 @@@ static int inet6_dump_addr(struct sk_bu enum addr_type_t type) { struct net *net = sock_net(skb->sk); + struct nlattr *tb[IFA_MAX+1]; + struct net *tgt_net = net; + int netnsid = -1; int h, s_h; int idx, ip_idx; int s_idx, s_ip_idx; @@@ -5022,22 -4994,11 +5020,22 @@@ s_idx = idx = cb->args[1]; s_ip_idx = ip_idx = cb->args[2];
+ if (nlmsg_parse(cb->nlh, sizeof(struct ifaddrmsg), tb, IFA_MAX, + ifa_ipv6_policy, NULL) >= 0) { + if (tb[IFA_TARGET_NETNSID]) { + netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); + + tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); + if (IS_ERR(tgt_net)) + return PTR_ERR(tgt_net); + } + } + rcu_read_lock(); - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq; + cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq; for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; - head = &net->dev_index_head[h]; + head = &tgt_net->dev_index_head[h]; hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@@ -5049,7 -5010,7 +5047,7 @@@ goto cont;
if (in6_dump_addrs(idev, skb, cb, type, - s_ip_idx, &ip_idx) < 0) + s_ip_idx, &ip_idx, netnsid) < 0) goto done; cont: idx++; @@@ -5060,8 -5021,6 +5058,8 @@@ done cb->args[0] = h; cb->args[1] = idx; cb->args[2] = ip_idx; + if (netnsid >= 0) + put_net(tgt_net);
return skb->len; } @@@ -5092,14 -5051,6 +5090,14 @@@ static int inet6_rtm_getaddr(struct sk_ struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); + struct inet6_fill_args fillargs = { + .portid = NETLINK_CB(in_skb).portid, + .seq = nlh->nlmsg_seq, + .event = RTM_NEWADDR, + .flags = 0, + .netnsid = -1, + }; + struct net *tgt_net = net; struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; struct in6_addr *addr = NULL, *peer; @@@ -5113,24 -5064,15 +5111,24 @@@ if (err < 0) return err;
+ if (tb[IFA_TARGET_NETNSID]) { + fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); + + tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk, + fillargs.netnsid); + if (IS_ERR(tgt_net)) + return PTR_ERR(tgt_net); + } + addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); if (!addr) return -EINVAL;
ifm = nlmsg_data(nlh); if (ifm->ifa_index) - dev = dev_get_by_index(net, ifm->ifa_index); + dev = dev_get_by_index(tgt_net, ifm->ifa_index);
- ifa = ipv6_get_ifaddr(net, addr, dev, 1); + ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1); if (!ifa) { err = -EADDRNOTAVAIL; goto errout; @@@ -5142,22 -5084,20 +5140,22 @@@ goto errout_ifa; }
- err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, RTM_NEWADDR, 0); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout_ifa; } - err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); + err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid); errout_ifa: in6_ifa_put(ifa); errout: if (dev) dev_put(dev); + if (fillargs.netnsid >= 0) + put_net(tgt_net); + return err; }
@@@ -5165,20 -5105,13 +5163,20 @@@ static void inet6_ifa_notify(int event { struct sk_buff *skb; struct net *net = dev_net(ifa->idev->dev); + struct inet6_fill_args fillargs = { + .portid = 0, + .seq = 0, + .event = event, + .flags = 0, + .netnsid = -1, + }; int err = -ENOBUFS;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); if (!skb) goto errout;
- err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); diff --combined net/ipv6/route.c index 938db8ae2316,826b14de7dbb..d28f83e01593 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -364,11 -364,14 +364,14 @@@ EXPORT_SYMBOL(ip6_dst_alloc)
static void ip6_dst_destroy(struct dst_entry *dst) { + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rt6_info *rt = (struct rt6_info *)dst; struct fib6_info *from; struct inet6_dev *idev;
- dst_destroy_metrics_generic(dst); + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); + rt6_uncached_list_del(rt);
idev = rt->rt6i_idev; @@@ -976,6 -979,10 +979,10 @@@ static void rt6_set_from(struct rt6_inf rt->rt6i_flags &= ~RTF_EXPIRES; rcu_assign_pointer(rt->from, from); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); + if (from->fib6_metrics != &dst_default_metrics) { + rt->dst._metrics |= DST_METRICS_REFCOUNTED; + refcount_inc(&from->fib6_metrics->refcnt); + } }
/* Caller must already hold reference to @ort */ @@@ -993,6 -1000,7 +1000,6 @@@ static void ip6_rt_copy_init(struct rt6 #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->fib6_src; #endif - rt->rt6i_prefsrc = ort->fib6_prefsrc; }
static struct fib6_node* fib6_backtrack(struct fib6_node *fn, @@@ -1446,6 -1454,11 +1453,6 @@@ static int rt6_insert_exception(struct if (ort->fib6_src.plen) src_key = &nrt->rt6i_src.addr; #endif - - /* Update rt6i_prefsrc as it could be changed - * in rt6_remove_prefsrc() - */ - nrt->rt6i_prefsrc = ort->fib6_prefsrc; /* rt6_mtu_change() might lower mtu on ort. * Only insert this exception route if its mtu * is less than ort's mtu value. @@@ -1627,6 -1640,25 +1634,6 @@@ static void rt6_update_exception_stamp_ rcu_read_unlock(); }
-static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt) -{ - struct rt6_exception_bucket *bucket; - struct rt6_exception *rt6_ex; - int i; - - bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, - lockdep_is_held(&rt6_exception_lock)); - - if (bucket) { - for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { - hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { - rt6_ex->rt6i->rt6i_prefsrc.plen = 0; - } - bucket++; - } - } -} - static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, struct rt6_info *rt, int mtu) { @@@ -2071,8 -2103,7 +2078,8 @@@ struct dst_entry *ip6_route_output_flag { bool any_src;
- if (rt6_need_strict(&fl6->daddr)) { + if (ipv6_addr_type(&fl6->daddr) & + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) { struct dst_entry *dst;
dst = l3mdev_link_scope_lookup(net, fl6); @@@ -3111,6 -3142,8 +3118,6 @@@ install_route rt->fib6_nh.nh_dev = dev; rt->fib6_table = table;
- cfg->fc_nlinfo.nl_net = dev_net(dev); - if (idev) in6_dev_put(idev);
@@@ -3767,6 -3800,8 +3774,6 @@@ static int fib6_remove_prefsrc(struct f spin_lock_bh(&rt6_exception_lock); /* remove prefsrc entry */ rt->fib6_prefsrc.plen = 0; - /* need to update cache as well */ - rt6_exceptions_remove_prefsrc(rt); spin_unlock_bh(&rt6_exception_lock); } return 0;