The following commit has been merged in the master branch: commit 765b7590c92d849806e9a27ab3a5a17cfc6a47a9 Merge: 4bc61b0b16955322576265724ab0a0075a30dc84 089cf7f6ecb266b6a4164919a2e69bd2f938374a Author: David S. Miller davem@davemloft.net Date: Mon Sep 2 11:20:17 2019 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
r8152 conflicts are the NAPI fixes in 'net' overlapping with some tasklet stuff in net-next
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined Documentation/devicetree/bindings/net/dsa/ksz.txt index 5e8429b6f9ca,113e7ac79aad..95e91e84151c --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@@ -5,9 -5,6 +5,9 @@@ Required properties
- compatible: For external switch chips, compatible string must be exactly one of the following: + - "microchip,ksz8765" + - "microchip,ksz8794" + - "microchip,ksz8795" - "microchip,ksz9477" - "microchip,ksz9897" - "microchip,ksz9896" @@@ -15,6 -12,7 +15,7 @@@ - "microchip,ksz8565" - "microchip,ksz9893" - "microchip,ksz9563" + - "microchip,ksz8563"
Optional properties:
diff --combined MAINTAINERS index 68b48de8765b,e7a47b5210fd..a081c477d1d1 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -683,7 -683,7 +683,7 @@@ S: Maintaine F: drivers/crypto/sunxi-ss/
ALLWINNER VPU DRIVER - M: Maxime Ripard maxime.ripard@bootlin.com + M: Maxime Ripard mripard@kernel.org M: Paul Kocialkowski paul.kocialkowski@bootlin.com L: linux-media@vger.kernel.org S: Maintained @@@ -938,14 -938,6 +938,14 @@@ S: Supporte F: drivers/mux/adgs1408.c F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt
+ANALOG DEVICES INC ADIN DRIVER +M: Alexandru Ardelean alexaundru.ardelean@analog.com +L: netdev@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/net/phy/adin.c +F: Documentation/devicetree/bindings/net/adi,adin.yaml + ANALOG DEVICES INC ADIS DRIVER LIBRARY M: Alexandru Ardelean alexandru.ardelean@analog.com S: Supported @@@ -1416,7 -1408,7 +1416,7 @@@ S: Maintaine F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support - M: Maxime Ripard maxime.ripard@bootlin.com + M: Maxime Ripard mripard@kernel.org M: Chen-Yu Tsai wens@csie.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -2923,7 -2915,6 +2923,7 @@@ BATMAN ADVANCE M: Marek Lindner mareklindner@neomailbox.ch M: Simon Wunderlich sw@simonwunderlich.de M: Antonio Quartulli a@unstable.cc +M: Sven Eckelmann sven@narfation.org L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ B: https://www.open-mesh.org/projects/batman-adv/issues @@@ -3586,7 -3577,7 +3586,7 @@@ F: Documentation/filesystems/caching/ca F: fs/cachefiles/
CADENCE MIPI-CSI2 BRIDGES - M: Maxime Ripard maxime.ripard@bootlin.com + M: Maxime Ripard mripard@kernel.org L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/cdns,*.txt @@@ -3644,12 -3635,9 +3644,12 @@@ S: Maintaine F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h +F: include/linux/can/led.h +F: include/linux/can/rx-offload.h F: include/linux/can/platform/ F: include/uapi/linux/can/error.h F: include/uapi/linux/can/netlink.h +F: include/uapi/linux/can/vxcan.h
CAN NETWORK LAYER M: Oliver Hartkopp socketcan@hartkopp.net @@@ -3662,8 -3650,6 +3662,8 @@@ S: Maintaine F: Documentation/networking/can.rst F: net/can/ F: include/linux/can/core.h +F: include/linux/can/skb.h +F: include/net/netns/can.h F: include/uapi/linux/can.h F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h @@@ -5309,7 -5295,7 +5309,7 @@@ F: include/linux/vga
DRM DRIVERS AND MISC GPU PATCHES M: Maarten Lankhorst maarten.lankhorst@linux.intel.com - M: Maxime Ripard maxime.ripard@bootlin.com + M: Maxime Ripard mripard@kernel.org M: Sean Paul sean@poorly.run W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html S: Maintained @@@ -5322,7 -5308,7 +5322,7 @@@ F: include/uapi/drm/drm F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10 - M: Maxime Ripard maxime.ripard@bootlin.com + M: Maxime Ripard mripard@kernel.org L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ @@@ -7470,7 -7456,6 +7470,7 @@@ F: drivers/hid/hid-hyperv. F: drivers/hv/ F: drivers/input/serio/hyperv-keyboard.c F: drivers/pci/controller/pci-hyperv.c +F: drivers/pci/controller/pci-hyperv-intf.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c @@@ -7528,7 -7513,7 +7528,7 @@@ I2C MV64XXX MARVELL AND ALLWINNER DRIVE M: Gregory CLEMENT gregory.clement@bootlin.com L: linux-i2c@vger.kernel.org S: Maintained - F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt + F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml F: drivers/i2c/busses/i2c-mv64xxx.c
I2C OVER PARALLEL PORT @@@ -9244,6 -9229,18 +9244,18 @@@ F: include/linux/nd. F: include/linux/libnvdimm.h F: include/uapi/linux/ndctl.h
+ LICENSES and SPDX stuff + M: Thomas Gleixner tglx@linutronix.de + M: Greg Kroah-Hartman gregkh@linuxfoundation.org + L: linux-spdx@vger.kernel.org + S: Maintained + T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git + F: COPYING + F: Documentation/process/license-rules.rst + F: LICENSES/ + F: scripts/spdxcheck-test.sh + F: scripts/spdxcheck.py + LIGHTNVM PLATFORM SUPPORT M: Matias Bjorling mb@lightnvm.io W: http://github/OpenChannelSSD @@@ -11169,7 -11166,6 +11181,7 @@@ S: Maintaine W: https://fedorahosted.org/dropwatch/ F: net/core/drop_monitor.c F: include/uapi/linux/net_dropmon.h +F: include/net/drop_monitor.h
NETWORKING DRIVERS M: "David S. Miller" davem@davemloft.net @@@ -11349,6 -11345,7 +11361,6 @@@ F: include/net/nfc F: include/uapi/linux/nfc.h F: drivers/nfc/ F: include/linux/platform_data/nfcmrvl.h -F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS @@@ -13243,7 -13240,7 +13255,7 @@@ M: Manish Chopra <manishc@marvell.com M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/ethernet/qlogic/qlge/ +F: drivers/staging/qlge/
QM1D1B0004 MEDIA DRIVER M: Akihiro Tsukada tskd08@gmail.com diff --combined drivers/net/dsa/microchip/ksz9477_spi.c index a226b389e12d,098b01e4ed1a..2e402e4d866f --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@@ -13,6 -13,7 +13,6 @@@ #include <linux/regmap.h> #include <linux/spi/spi.h>
-#include "ksz_priv.h" #include "ksz_common.h"
#define SPI_ADDR_SHIFT 24 @@@ -80,6 -81,7 +80,7 @@@ static const struct of_device_id ksz947 { .compatible = "microchip,ksz9897" }, { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9563" }, + { .compatible = "microchip,ksz8563" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --combined drivers/net/dsa/microchip/ksz_common.h index c44a8d23d973,72ec250b9540..13d027baaa8b --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@@ -7,152 -7,9 +7,152 @@@ #ifndef __KSZ_COMMON_H #define __KSZ_COMMON_H
+#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/phy.h> #include <linux/regmap.h> +#include <net/dsa.h> + +struct vlan_table { + u32 table[3]; +}; + +struct ksz_port_mib { + struct mutex cnt_mutex; /* structure access */ + u8 cnt_ptr; + u64 *counters; +}; + +struct ksz_port { + u16 member; + u16 vid_member; + int stp_state; + struct phy_device phydev; + + u32 on:1; /* port is not disabled by hardware */ + u32 phy:1; /* port has a PHY */ + u32 fiber:1; /* port is fiber */ + u32 sgmii:1; /* port is SGMII */ + u32 force:1; + u32 read:1; /* read MIB counters in background */ + u32 freeze:1; /* MIB counter freeze is enabled */ + + struct ksz_port_mib mib; +}; + +struct ksz_device { + struct dsa_switch *ds; + struct ksz_platform_data *pdata; + const char *name; + + struct mutex dev_mutex; /* device access */ + struct mutex stats_mutex; /* status access */ + struct mutex alu_mutex; /* ALU access */ + struct mutex vlan_mutex; /* vlan access */ + const struct ksz_dev_ops *dev_ops; + + struct device *dev; + struct regmap *regmap[3]; + + void *priv; + + struct gpio_desc *reset_gpio; /* Optional reset GPIO */ + + /* chip specific data */ + u32 chip_id; + int num_vlans; + int num_alus; + int num_statics; + int cpu_port; /* port connected to CPU */ + int cpu_ports; /* port bitmap can be cpu port */ + int phy_port_cnt; + int port_cnt; + int reg_mib_cnt; + int mib_cnt; + int mib_port_cnt; + int last_port; /* ports after that not used */ + phy_interface_t interface; + u32 regs_size; + bool phy_errata_9477; + bool synclko_125; + + struct vlan_table *vlan_cache; + + struct ksz_port *ports; + struct timer_list mib_read_timer; + struct work_struct mib_read; + unsigned long mib_read_interval; + u16 br_member; + u16 member; + u16 live_ports; + u16 on_ports; /* ports enabled by DSA */ + u16 rx_ports; + u16 tx_ports; + u16 mirror_rx; + u16 mirror_tx; + u32 features; /* chip specific features */ + u32 overrides; /* chip functions set by user */ + u16 host_mask; + u16 port_mask; +}; + +struct alu_struct { + /* entry 1 */ + u8 is_static:1; + u8 is_src_filter:1; + u8 is_dst_filter:1; + u8 prio_age:3; + u32 _reserv_0_1:23; + u8 mstp:3; + /* entry 2 */ + u8 is_override:1; + u8 is_use_fid:1; + u32 _reserv_1_1:23; + u8 port_forward:7; + /* entry 3 & 4*/ + u32 _reserv_2_1:9; + u8 fid:7; + u8 mac[ETH_ALEN]; +}; + +struct ksz_dev_ops { + u32 (*get_port_addr)(int port, int offset); + void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); + void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); + void (*phy_setup)(struct ksz_device *dev, int port, + struct phy_device *phy); + void (*port_cleanup)(struct ksz_device *dev, int port); + void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); + void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); + void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); + int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr, + u8 *fid, u8 *src_port, u8 *timestamp, + u16 *entries); + int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr, + u64 *cnt); + void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt); + void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); + void (*port_init_cnt)(struct ksz_device *dev, int port); + int (*shutdown)(struct ksz_device *dev); + int (*detect)(struct ksz_device *dev); + int (*init)(struct ksz_device *dev); + void (*exit)(struct ksz_device *dev); +}; + +struct ksz_device *ksz_switch_alloc(struct device *base, void *priv); +int ksz_switch_register(struct ksz_device *dev, + const struct ksz_dev_ops *ops); +void ksz_switch_remove(struct ksz_device *dev); + +int ksz8795_switch_register(struct ksz_device *dev); +int ksz9477_switch_register(struct ksz_device *dev);
-void ksz_port_cleanup(struct ksz_device *dev, int port); void ksz_update_port_member(struct ksz_device *dev, int port); void ksz_init_mib_timer(struct ksz_device *dev);
@@@ -211,22 -68,6 +211,22 @@@ static inline int ksz_read32(struct ksz return ret; }
+static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val) +{ + u32 value[2]; + int ret; + + ret = regmap_bulk_read(dev->regmap[2], reg, value, 2); + if (!ret) { + /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ + value[0] = swab32(value[0]); + value[1] = swab32(value[1]); + *val = swab64((u64)*value); + } + + return ret; +} + static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) { return regmap_write(dev->regmap[0], reg, value); @@@ -242,18 -83,6 +242,18 @@@ static inline int ksz_write32(struct ks return regmap_write(dev->regmap[2], reg, value); }
+static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value) +{ + u32 val[2]; + + /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ + value = swab64(value); + val[0] = swab32(value & 0xffffffffULL); + val[1] = swab32(value >> 32ULL); + + return regmap_bulk_write(dev->regmap[2], reg, val, 2); +} + static inline void ksz_pread8(struct ksz_device *dev, int port, int offset, u8 *data) { @@@ -299,6 -128,7 +299,7 @@@ static inline void ksz_pwrite32(struct
#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ { \ + .name = #width, \ .val_bits = (width), \ .reg_stride = (width) / 8, \ .reg_bits = (regbits) + (regalign), \ diff --combined drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1586316eb6f1,b22196880d6d..12cb77ef1081 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@@ -1124,6 -1124,7 +1124,7 @@@ static const struct ethtool_ops bcmgene .set_coalesce = bcmgenet_set_coalesce, .get_link_ksettings = bcmgenet_get_link_ksettings, .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, };
/* Power down the unimac, based on mode. */ @@@ -2515,14 -2516,19 +2516,14 @@@ static int bcmgenet_dma_teardown(struc static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { struct netdev_queue *txq; - struct sk_buff *skb; - struct enet_cb *cb; int i;
bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++) { - cb = priv->tx_cbs + i; - skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb); - if (skb) - dev_kfree_skb(skb); - } + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i));
for (i = 0; i < priv->hw_params->tx_queues; i++) { txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); @@@ -3432,6 -3438,7 +3433,6 @@@ static int bcmgenet_probe(struct platfo struct bcmgenet_priv *priv; struct net_device *dev; const void *macaddr; - struct resource *r; unsigned int i; int err = -EIO; const char *phy_mode_str; @@@ -3471,7 -3478,8 +3472,7 @@@ macaddr = pd->mac_address; }
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, r); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { err = PTR_ERR(priv->base); goto err; diff --combined drivers/net/ethernet/ibm/ibmvnic.c index 07efa2b40003,fa4bb940665c..4f83f97ffe8b --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -1485,7 -1485,7 +1485,7 @@@ static netdev_tx_t ibmvnic_xmit(struct
memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { @@@ -1983,6 -1983,10 +1983,10 @@@ static void __ibmvnic_reset(struct work
rwi = get_next_rwi(adapter); while (rwi) { + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) + goto out; + if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; rc = do_hard_reset(adapter, rwi, reset_state); @@@ -2007,7 -2011,7 +2011,7 @@@ netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } - + out: adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); diff --combined drivers/net/ethernet/marvell/sky2.c index c2e00bb587cd,e0363870f3a5..5f56ee83e3b1 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@@ -4931,6 -4931,13 +4931,13 @@@ static const struct dmi_system_id msi_b DMI_MATCH(DMI_BOARD_NAME, "P6T"), }, }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, {} };
@@@ -5167,7 -5174,8 +5174,7 @@@ static void sky2_remove(struct pci_dev
static int sky2_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct sky2_hw *hw = pci_get_drvdata(pdev); + struct sky2_hw *hw = dev_get_drvdata(dev); int i;
if (!hw) diff --combined drivers/net/ethernet/netronome/nfp/flower/offload.c index b0708460e342,457bdc60f3ee..987ae221f6be --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@@ -61,11 -61,6 +61,11 @@@ NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV6)
+#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ + (NFP_FLOWER_LAYER_PORT | \ + NFP_FLOWER_LAYER_MAC | \ + NFP_FLOWER_LAYER_IPV4) + struct nfp_flower_merge_check { union { struct { @@@ -494,7 -489,6 +494,7 @@@ nfp_flower_allocate_new(struct nfp_fl_k flow_pay->meta.flags = 0; INIT_LIST_HEAD(&flow_pay->linked_flows); flow_pay->in_hw = false; + flow_pay->pre_tun_rule.dev = NULL;
return flow_pay;
@@@ -738,62 -732,28 +738,62 @@@ nfp_flower_copy_pre_actions(char *act_d return act_off; }
-static int nfp_fl_verify_post_tun_acts(char *acts, int len) +static int +nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) { struct nfp_fl_act_head *a; unsigned int act_off = 0;
while (act_off < len) { a = (struct nfp_fl_act_head *)&acts[act_off]; - if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) + + if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) + *vlan = (struct nfp_fl_push_vlan *)a; + else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ; }
+ /* Ensure any VLAN push also has an egress action. */ + if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) + return -EOPNOTSUPP; + return 0; }
static int +nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) +{ + struct nfp_fl_set_ipv4_tun *tun; + struct nfp_fl_act_head *a; + unsigned int act_off = 0; + + while (act_off < len) { + a = (struct nfp_fl_act_head *)&acts[act_off]; + + if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) { + tun = (struct nfp_fl_set_ipv4_tun *)a; + tun->outer_vlan_tpid = vlan->vlan_tpid; + tun->outer_vlan_tci = vlan->vlan_tci; + + return 0; + } + + act_off += a->len_lw << NFP_FL_LW_SIZ; + } + + /* Return error if no tunnel action is found. */ + return -EOPNOTSUPP; +} + +static int nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2, struct nfp_fl_payload *merge_flow) { unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; + struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; bool tunnel_act = false; char *merge_act; int err; @@@ -830,36 -790,18 +830,36 @@@ sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes - * a tunnel, sub_flow 2 can only have output actions for a valid merge. + * a tunnel, there are restrictions on what sub_flow 2 actions lead to a + * valid merge. */ if (tunnel_act) { char *post_tun_acts = &sub_flow2->action_data[pre_off2];
- err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, + &post_tun_push_vlan); if (err) return err; + + if (post_tun_push_vlan) { + pre_off2 += sizeof(*post_tun_push_vlan); + sub2_act_len -= sizeof(*post_tun_push_vlan); + } }
/* Copy remaining actions from sub_flows 1 and 2. */ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); + + if (post_tun_push_vlan) { + /* Update tunnel action in merge to include VLAN push. */ + err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, + post_tun_push_vlan); + if (err) + return err; + + merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); + } + merge_act += sub1_act_len; memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
@@@ -1003,106 -945,6 +1003,106 @@@ err_destroy_merge_flow }
/** + * nfp_flower_validate_pre_tun_rule() + * @app: Pointer to the APP handle + * @flow: Pointer to NFP flow representation of rule + * @extack: Netlink extended ACK report + * + * Verifies the flow as a pre-tunnel rule. + * + * Return: negative value on error, 0 if verified. + */ +static int +nfp_flower_validate_pre_tun_rule(struct nfp_app *app, + struct nfp_fl_payload *flow, + struct netlink_ext_ack *extack) +{ + struct nfp_flower_meta_tci *meta_tci; + struct nfp_flower_mac_mpls *mac; + struct nfp_fl_act_head *act; + u8 *mask = flow->mask_data; + bool vlan = false; + int act_offset; + u8 key_layer; + + meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { + u16 vlan_tci = be16_to_cpu(meta_tci->tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } + + key_layer = meta_tci->nfp_flow_key_layer; + if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); + return -EOPNOTSUPP; + } + + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); + return -EOPNOTSUPP; + } + + /* Skip fields known to exist. */ + mask += sizeof(struct nfp_flower_meta_tci); + mask += sizeof(struct nfp_flower_in_port); + + /* Ensure destination MAC address is fully matched. */ + mac = (struct nfp_flower_mac_mpls *)mask; + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); + return -EOPNOTSUPP; + } + + if (key_layer & NFP_FLOWER_LAYER_IPV4) { + int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); + int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); + int i; + + mask += sizeof(struct nfp_flower_mac_mpls); + + /* Ensure proto and flags are the only IP layer fields. */ + for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++) + if (mask[i] && i != ip_flags && i != ip_proto) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); + return -EOPNOTSUPP; + } + } + + /* Action must be a single egress or pop_vlan and egress. */ + act_offset = 0; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + if (vlan) { + if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + } + + if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + + /* Ensure there are no more actions after egress. */ + if (act_offset != flow->meta.act_len) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** * nfp_flower_add_offload() - Adds a new flow to hardware. * @app: Pointer to the APP handle * @netdev: netdev structure. @@@ -1152,12 -994,6 +1152,12 @@@ nfp_flower_add_offload(struct nfp_app * if (err) goto err_destroy_flow;
+ if (flow_pay->pre_tun_rule.dev) { + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); + if (err) + goto err_destroy_flow; + } + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); if (err) goto err_destroy_flow; @@@ -1170,11 -1006,8 +1170,11 @@@ goto err_release_metadata; }
- err = nfp_flower_xmit_flow(app, flow_pay, - NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (flow_pay->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); + else + err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_remove_rhash;
@@@ -1316,11 -1149,8 +1316,11 @@@ nfp_flower_del_offload(struct nfp_app * goto err_free_merge_flow; }
- err = nfp_flower_xmit_flow(app, nfp_flow, - NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + if (nfp_flow->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); + else + err = nfp_flower_xmit_flow(app, nfp_flow, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); /* Fall through on error. */
err_free_merge_flow: @@@ -1579,9 -1409,10 +1579,10 @@@ nfp_flower_setup_indr_tc_block(struct n struct nfp_flower_priv *priv = app->priv; struct flow_block_cb *block_cb;
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && - nfp_flower_internal_port_can_offload(app, netdev))) + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !nfp_flower_internal_port_can_offload(app, netdev)) || + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP;
switch (f->command) { @@@ -1656,17 -1487,16 +1657,17 @@@ int nfp_flower_reg_indir_block_handler( return NOTIFY_OK;
if (event == NETDEV_REGISTER) { - err = __tc_indr_block_cb_register(netdev, app, - nfp_flower_indr_setup_tc_cb, - app); + err = __flow_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + app); if (err) nfp_flower_cmsg_warn(app, "Indirect block reg failed - %s\n", netdev->name); } else if (event == NETDEV_UNREGISTER) { - __tc_indr_block_cb_unregister(netdev, - nfp_flower_indr_setup_tc_cb, app); + __flow_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, + app); }
return NOTIFY_OK; diff --combined drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index def8c198b016,f0ee982eb1b5..2600ce476d6b --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@@ -15,24 -15,6 +15,24 @@@
#define NFP_FL_MAX_ROUTES 32
+#define NFP_TUN_PRE_TUN_RULE_LIMIT 32 +#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 +#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 + +/** + * struct nfp_tun_pre_run_rule - rule matched before decap + * @flags: options for the rule offset + * @port_idx: index of destination MAC address for the rule + * @vlan_tci: VLAN info associated with MAC + * @host_ctx_id: stats context of rule to update + */ +struct nfp_tun_pre_tun_rule { + __be32 flags; + __be16 port_idx; + __be16 vlan_tci; + __be32 host_ctx_id; +}; + /** * struct nfp_tun_active_tuns - periodic message of active tunnels * @seq: sequence number of the message @@@ -142,12 -124,11 +142,12 @@@ enum nfp_flower_mac_offload_cmd
/** * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC - * @ht_node: Hashtable entry - * @addr: Offloaded MAC address - * @index: Offloaded index for given MAC address - * @ref_count: Number of devs using this MAC address - * @repr_list: List of reprs sharing this MAC address + * @ht_node: Hashtable entry + * @addr: Offloaded MAC address + * @index: Offloaded index for given MAC address + * @ref_count: Number of devs using this MAC address + * @repr_list: List of reprs sharing this MAC address + * @bridge_count: Number of bridge/internal devs with MAC */ struct nfp_tun_offloaded_mac { struct rhash_head ht_node; @@@ -155,7 -136,6 +155,7 @@@ u16 index; int ref_count; struct list_head repr_list; + int bridge_count; };
static const struct rhashtable_params offloaded_macs_params = { @@@ -348,13 -328,13 +348,13 @@@ nfp_tun_neigh_event_handler(struct noti
flow.daddr = *(__be32 *)n->primary_key;
- /* Only concerned with route changes for representors. */ - if (!nfp_netdev_is_nfp_repr(n->dev)) - return NOTIFY_DONE; - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app;
+ if (!nfp_netdev_is_nfp_repr(n->dev) && + !nfp_flower_internal_port_can_offload(app, n->dev)) + return NOTIFY_DONE; + /* Only concerned with changes to routes already added to NFP. */ if (!nfp_tun_has_route(app, flow.daddr)) return NOTIFY_DONE; @@@ -576,8 -556,6 +576,8 @@@ nfp_tunnel_offloaded_macs_inc_ref_and_l list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list); + } else if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count++; }
entry->ref_count++; @@@ -594,35 -572,20 +594,35 @@@ nfp_tunnel_add_shared_mac(struct nfp_ap
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { - nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); - return 0; + if (entry->bridge_count || + !nfp_flower_is_supported_bridge(netdev)) { + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, + netdev, mod); + return 0; + } + + /* MAC is global but matches need to go to pre_tun table. */ + nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; }
- /* Assign a global index if non-repr or MAC address is now shared. */ - if (entry || !port) { - ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, - NFP_MAX_MAC_INDEX, GFP_KERNEL); - if (ida_idx < 0) - return ida_idx; + if (!nfp_mac_idx) { + /* Assign a global index if non-repr or MAC is now shared. */ + if (entry || !port) { + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, + NFP_MAX_MAC_INDEX, GFP_KERNEL); + if (ida_idx < 0) + return ida_idx;
- nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); - } else { - nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); + nfp_mac_idx = + nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; + + } else { + nfp_mac_idx = + nfp_tunnel_get_mac_idx_from_phy_port_id(port); + } }
if (!entry) { @@@ -691,25 -654,6 +691,25 @@@ nfp_tunnel_del_shared_mac(struct nfp_ap list_del(&repr_priv->mac_list); }
+ if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count--; + + if (!entry->bridge_count && entry->ref_count) { + u16 nfp_mac_idx; + + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, + false)) { + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", + netdev_name(netdev)); + return 0; + } + + entry->index = nfp_mac_idx; + return 0; + } + } + /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { u16 nfp_mac_idx; @@@ -769,9 -713,6 +769,9 @@@ nfp_tunnel_offload_mac(struct nfp_app * return 0;
repr_priv = repr->app_priv; + if (repr_priv->on_bridge) + return 0; + mac_offloaded = &repr_priv->mac_offloaded; off_mac = &repr_priv->offloaded_mac_addr[0]; port = nfp_repr_get_port_id(netdev); @@@ -887,119 -828,10 +887,119 @@@ int nfp_tunnel_mac_event_handler(struc if (err) nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", netdev_name(netdev)); + } else if (event == NETDEV_CHANGEUPPER) { + /* If a repr is attached to a bridge then tunnel packets + * entering the physical port are directed through the bridge + * datapath and cannot be directly detunneled. Therefore, + * associated offloaded MACs and indexes should not be used + * by fw for detunneling. + */ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *repr; + + if (!nfp_netdev_is_nfp_repr(netdev) || + !nfp_flower_is_supported_bridge(upper)) + return NOTIFY_OK; + + repr = netdev_priv(netdev); + if (repr->app != app) + return NOTIFY_OK; + + repr_priv = repr->app_priv; + + if (info->linking) { + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_DEL)) + nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", + netdev_name(netdev)); + repr_priv->on_bridge = true; + } else { + repr_priv->on_bridge = false; + + if (!(netdev->flags & IFF_UP)) + return NOTIFY_OK; + + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_ADD)) + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", + netdev_name(netdev)); + } } return NOTIFY_OK; }
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_tun_pre_tun_rule payload; + struct net_device *internal_dev; + int err; + + if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) + return -ENOSPC; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + internal_dev = flow->pre_tun_rule.dev; + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.host_ctx_id = flow->meta.host_ctx_id; + + /* Lookup MAC index for the pre-tunnel rule egress device. + * Note that because the device is always an internal port, it will + * have a constant global index so does not need to be tracked. + */ + mac_entry = nfp_tunnel_lookup_offloaded_macs(app, + internal_dev->dev_addr); + if (!mac_entry) + return -ENOENT; + + payload.port_idx = cpu_to_be16(mac_entry->index); + + /* Copy mac id and vlan to flow - dev may not exist at delete time. */ + flow->pre_tun_rule.vlan_tci = payload.vlan_tci; + flow->pre_tun_rule.port_idx = payload.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt++; + + return 0; +} + +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_pre_tun_rule payload; + u32 tmp_flags = 0; + int err; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; + payload.flags = cpu_to_be32(tmp_flags); + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.port_idx = flow->pre_tun_rule.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt--; + + return 0; +} + int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; diff --combined drivers/net/phy/phy.c index 0acd5b49f450,6b0f89369b46..35d29a823af8 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@@ -507,7 -507,7 +507,7 @@@ static int phy_config_aneg(struct phy_d * allowed to call genphy_config_aneg() */ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) - return -EOPNOTSUPP; + return genphy_c45_config_aneg(phydev);
return genphy_config_aneg(phydev); } @@@ -608,21 -608,38 +608,21 @@@ static int phy_poll_aneg_done(struct ph */ int phy_speed_down(struct phy_device *phydev, bool sync) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp); int ret;
if (phydev->autoneg != AUTONEG_ENABLE) return 0;
- linkmode_copy(adv_old, phydev->advertising); - linkmode_copy(adv, phydev->lp_advertising); - linkmode_and(adv, adv, phydev->supported); - - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) || - linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) { - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->advertising); - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - adv) || - linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - adv)) { - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->advertising); - } + linkmode_copy(adv_tmp, phydev->advertising); + + ret = phy_speed_down_core(phydev); + if (ret) + return ret;
- if (linkmode_equal(phydev->advertising, adv_old)) + linkmode_copy(phydev->adv_old, adv_tmp); + + if (linkmode_equal(phydev->advertising, adv_tmp)) return 0;
ret = phy_config_aneg(phydev); @@@ -641,19 -658,30 +641,19 @@@ EXPORT_SYMBOL_GPL(phy_speed_down) */ int phy_speed_up(struct phy_device *phydev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, }; - __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds); - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); - __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds); - - linkmode_copy(adv_old, phydev->advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
if (phydev->autoneg != AUTONEG_ENABLE) return 0;
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds); + if (linkmode_empty(phydev->adv_old)) + return 0;
- linkmode_andnot(not_speeds, adv_old, all_speeds); - linkmode_copy(supported, phydev->supported); - linkmode_and(speeds, supported, all_speeds); - linkmode_or(phydev->advertising, not_speeds, speeds); + linkmode_copy(adv_tmp, phydev->advertising); + linkmode_copy(phydev->advertising, phydev->adv_old); + linkmode_zero(phydev->adv_old);
- if (linkmode_equal(phydev->advertising, adv_old)) + if (linkmode_equal(phydev->advertising, adv_tmp)) return 0;
return phy_config_aneg(phydev); @@@ -911,8 -939,8 +911,8 @@@ void phy_state_machine(struct work_stru if (phydev->link) { phydev->link = 0; phy_link_down(phydev, true); - do_suspend = true; } + do_suspend = true; break; }
diff --combined drivers/net/usb/r8152.c index c6fa0c17c13d,04137ac373b0..778d27d1fb15 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -22,11 -22,10 +22,11 @@@ #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> +#include <linux/atomic.h> #include <linux/acpi.h>
/* Information for net-next */ -#define NETNEXT_VERSION "09" +#define NETNEXT_VERSION "10"
/* Information for net */ #define NET_VERSION "10" @@@ -584,9 -583,6 +584,9 @@@ enum rtl_register_content #define TX_ALIGN 4 #define RX_ALIGN 8
+#define RTL8152_RX_MAX_PENDING 4096 +#define RTL8152_RXFG_HEADSZ 256 + #define INTR_LINK 0x0004
#define RTL8152_REQT_READ 0xc0 @@@ -619,7 -615,7 +619,7 @@@ enum rtl8152_flags RTL8152_LINK_CHG, SELECTIVE_SUSPEND, PHY_RESET, - SCHEDULE_NAPI, + SCHEDULE_TASKLET, GREEN_ETHERNET, DELL_TB_RX_AGG_BUG, }; @@@ -698,11 -694,11 +698,11 @@@ struct tx_desc struct r8152;
struct rx_agg { - struct list_head list; + struct list_head list, info_list; struct urb *urb; struct r8152 *context; + struct page *page; void *buffer; - void *head; };
struct tx_agg { @@@ -723,7 -719,7 +723,7 @@@ struct r8152 struct net_device *netdev; struct urb *intr_urb; struct tx_agg tx_info[RTL8152_MAX_TX]; - struct rx_agg rx_info[RTL8152_MAX_RX]; + struct list_head rx_info, rx_used; struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; @@@ -733,7 -729,6 +733,7 @@@ #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notifier; #endif + struct tasklet_struct tx_tl;
struct rtl_ops { void (*init)(struct r8152 *); @@@ -749,21 -744,13 +749,21 @@@ void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops;
+ atomic_t rx_count; + + bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; u32 coalesce; + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; + u16 ocp_base; u16 speed; + u16 eee_adv; u8 *intr_buff; u8 version; u8 duplex; @@@ -1407,7 -1394,7 +1407,7 @@@ static void write_bulk_callback(struct return;
if (!skb_queue_empty(&tp->tx_queue)) - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); }
static void intr_callback(struct urb *urb) @@@ -1483,72 -1470,18 +1483,72 @@@ static inline void *tx_agg_align(void * return (void *)ALIGN((uintptr_t)data, TX_ALIGN); }
+static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) +{ + list_del(&agg->info_list); + + usb_free_urb(agg->urb); + put_page(agg->page); + kfree(agg); + + atomic_dec(&tp->rx_count); +} + +static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) +{ + struct net_device *netdev = tp->netdev; + int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; + unsigned int order = get_order(tp->rx_buf_sz); + struct rx_agg *rx_agg; + unsigned long flags; + + rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); + if (!rx_agg) + return NULL; + + rx_agg->page = alloc_pages(mflags | __GFP_COMP, order); + if (!rx_agg->page) + goto free_rx; + + rx_agg->buffer = page_address(rx_agg->page); + + rx_agg->urb = usb_alloc_urb(0, mflags); + if (!rx_agg->urb) + goto free_buf; + + rx_agg->context = tp; + + INIT_LIST_HEAD(&rx_agg->list); + INIT_LIST_HEAD(&rx_agg->info_list); + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&rx_agg->info_list, &tp->rx_info); + spin_unlock_irqrestore(&tp->rx_lock, flags); + + atomic_inc(&tp->rx_count); + + return rx_agg; + +free_buf: + __free_pages(rx_agg->page, order); +free_rx: + kfree(rx_agg); + return NULL; +} + static void free_all_mem(struct r8152 *tp) { + struct rx_agg *agg, *agg_next; + unsigned long flags; int i;
- for (i = 0; i < RTL8152_MAX_RX; i++) { - usb_free_urb(tp->rx_info[i].urb); - tp->rx_info[i].urb = NULL; + spin_lock_irqsave(&tp->rx_lock, flags);
- kfree(tp->rx_info[i].buffer); - tp->rx_info[i].buffer = NULL; - tp->rx_info[i].head = NULL; - } + list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) + free_rx_agg(tp, agg); + + spin_unlock_irqrestore(&tp->rx_lock, flags); + + WARN_ON(atomic_read(&tp->rx_count));
for (i = 0; i < RTL8152_MAX_TX; i++) { usb_free_urb(tp->tx_info[i].urb); @@@ -1572,28 -1505,46 +1572,28 @@@ static int alloc_all_mem(struct r8152 * struct usb_interface *intf = tp->intf; struct usb_host_interface *alt = intf->cur_altsetting; struct usb_host_endpoint *ep_intr = alt->endpoint + 2; - struct urb *urb; int node, i; - u8 *buf;
node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); + INIT_LIST_HEAD(&tp->rx_info); INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); + atomic_set(&tp->rx_count, 0);
for (i = 0; i < RTL8152_MAX_RX; i++) { - buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); - if (!buf) + if (!alloc_rx_agg(tp, GFP_KERNEL)) goto err1; - - if (buf != rx_agg_align(buf)) { - kfree(buf); - buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL, - node); - if (!buf) - goto err1; - } - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - kfree(buf); - goto err1; - } - - INIT_LIST_HEAD(&tp->rx_info[i].list); - tp->rx_info[i].context = tp; - tp->rx_info[i].urb = urb; - tp->rx_info[i].buffer = buf; - tp->rx_info[i].head = rx_agg_align(buf); }
for (i = 0; i < RTL8152_MAX_TX; i++) { + struct urb *urb; + u8 *buf; + buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; @@@ -1959,46 -1910,6 +1959,46 @@@ return_result return checksum; }
+static inline bool rx_count_exceed(struct r8152 *tp) +{ + return atomic_read(&tp->rx_count) > RTL8152_MAX_RX; +} + +static inline int agg_offset(struct rx_agg *agg, void *addr) +{ + return (int)(addr - agg->buffer); +} + +static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags) +{ + struct rx_agg *agg, *agg_next, *agg_free = NULL; + unsigned long flags; + + spin_lock_irqsave(&tp->rx_lock, flags); + + list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) { + if (page_count(agg->page) == 1) { + if (!agg_free) { + list_del_init(&agg->list); + agg_free = agg; + continue; + } + if (rx_count_exceed(tp)) { + list_del_init(&agg->list); + free_rx_agg(tp, agg); + } + break; + } + } + + spin_unlock_irqrestore(&tp->rx_lock, flags); + + if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending) + agg_free = alloc_rx_agg(tp, mflags); + + return agg_free; +} + static int rx_bottom(struct r8152 *tp, int budget) { unsigned long flags; @@@ -2034,7 -1945,7 +2034,7 @@@
list_for_each_safe(cursor, next, &rx_queue) { struct rx_desc *rx_desc; - struct rx_agg *agg; + struct rx_agg *agg, *agg_free; int len_used = 0; struct urb *urb; u8 *rx_data; @@@ -2046,16 -1957,14 +2046,16 @@@ if (urb->actual_length < ETH_ZLEN) goto submit;
- rx_desc = agg->head; - rx_data = agg->head; + agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); + + rx_desc = agg->buffer; + rx_data = agg->buffer; len_used += sizeof(struct rx_desc);
while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; - unsigned int pkt_len; + unsigned int pkt_len, rx_frag_head_sz; struct sk_buff *skb;
/* limite the skb numbers for rx_queue */ @@@ -2073,37 -1982,22 +2073,37 @@@ pkt_len -= ETH_FCS_LEN; rx_data += sizeof(struct rx_desc);
- skb = napi_alloc_skb(napi, pkt_len); + if (!agg_free || tp->rx_copybreak > pkt_len) + rx_frag_head_sz = pkt_len; + else + rx_frag_head_sz = tp->rx_copybreak; + + skb = napi_alloc_skb(napi, rx_frag_head_sz); if (!skb) { stats->rx_dropped++; goto find_next_rx; }
skb->ip_summed = r8152_rx_csum(tp, rx_desc); - memcpy(skb->data, rx_data, pkt_len); - skb_put(skb, pkt_len); + memcpy(skb->data, rx_data, rx_frag_head_sz); + skb_put(skb, rx_frag_head_sz); + pkt_len -= rx_frag_head_sz; + rx_data += rx_frag_head_sz; + if (pkt_len) { + skb_add_rx_frag(skb, 0, agg->page, + agg_offset(agg, rx_data), + pkt_len, + SKB_DATA_ALIGN(pkt_len)); + get_page(agg->page); + } + skb->protocol = eth_type_trans(skb, netdev); rtl_rx_vlan_tag(rx_desc, skb); if (work_done < budget) { - napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; - stats->rx_bytes += pkt_len; + stats->rx_bytes += skb->len; + napi_gro_receive(napi, skb); } else { __skb_queue_tail(&tp->rx_queue, skb); } @@@ -2111,24 -2005,10 +2111,24 @@@ find_next_rx: rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; - len_used = (int)(rx_data - (u8 *)agg->head); + len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); }
+ WARN_ON(!agg_free && page_count(agg->page) > 1); + + if (agg_free) { + spin_lock_irqsave(&tp->rx_lock, flags); + if (page_count(agg->page) == 1) { + list_add(&agg_free->list, &tp->rx_used); + } else { + list_add_tail(&agg->list, &tp->rx_used); + agg = agg_free; + urb = agg->urb; + } + spin_unlock_irqrestore(&tp->rx_lock, flags); + } + submit: if (!ret) { ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); @@@ -2185,12 -2065,8 +2185,12 @@@ static void tx_bottom(struct r8152 *tp } while (res == 0); }
-static void bottom_half(struct r8152 *tp) +static void bottom_half(unsigned long data) { + struct r8152 *tp; + + tp = (struct r8152 *)data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return;
@@@ -2202,7 -2078,7 +2202,7 @@@ if (!netif_carrier_ok(tp->netdev)) return;
- clear_bit(SCHEDULE_NAPI, &tp->flags); + clear_bit(SCHEDULE_TASKLET, &tp->flags);
tx_bottom(tp); } @@@ -2213,12 -2089,16 +2213,12 @@@ static int r8152_poll(struct napi_struc int work_done;
work_done = rx_bottom(tp, budget); - bottom_half(tp);
if (work_done < budget) { if (!napi_complete_done(napi, work_done)) goto out; if (!list_empty(&tp->rx_done)) napi_schedule(napi); - else if (!skb_queue_empty(&tp->tx_queue) && - !list_empty(&tp->tx_free)) - napi_schedule(napi); }
out: @@@ -2236,7 -2116,7 +2236,7 @@@ int r8152_submit_rx(struct r8152 *tp, s return 0;
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - agg->head, agg_buf_sz, + agg->buffer, tp->rx_buf_sz, (usb_complete_t)read_bulk_callback, agg);
ret = usb_submit_urb(agg->urb, mem_flags); @@@ -2372,11 -2252,11 +2372,11 @@@ static netdev_tx_t rtl8152_start_xmit(s
if (!list_empty(&tp->tx_free)) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - set_bit(SCHEDULE_NAPI, &tp->flags); + set_bit(SCHEDULE_TASKLET, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } else { usb_mark_last_busy(tp->udev); - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); } } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); @@@ -2453,80 -2333,44 +2453,80 @@@ static void rxdy_gated_en(struct r8152
static int rtl_start_rx(struct r8152 *tp) { - int i, ret = 0; + struct rx_agg *agg, *agg_next; + struct list_head tmp_list; + unsigned long flags; + int ret = 0, i = 0;
- INIT_LIST_HEAD(&tp->rx_done); - for (i = 0; i < RTL8152_MAX_RX; i++) { - INIT_LIST_HEAD(&tp->rx_info[i].list); - ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); - if (ret) - break; - } + INIT_LIST_HEAD(&tmp_list);
- if (ret && ++i < RTL8152_MAX_RX) { - struct list_head rx_queue; - unsigned long flags; + spin_lock_irqsave(&tp->rx_lock, flags);
- INIT_LIST_HEAD(&rx_queue); + INIT_LIST_HEAD(&tp->rx_done); + INIT_LIST_HEAD(&tp->rx_used);
- do { - struct rx_agg *agg = &tp->rx_info[i++]; - struct urb *urb = agg->urb; + list_splice_init(&tp->rx_info, &tmp_list);
- urb->actual_length = 0; - list_add_tail(&agg->list, &rx_queue); - } while (i < RTL8152_MAX_RX); + spin_unlock_irqrestore(&tp->rx_lock, flags);
- spin_lock_irqsave(&tp->rx_lock, flags); - list_splice_tail(&rx_queue, &tp->rx_done); - spin_unlock_irqrestore(&tp->rx_lock, flags); + list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { + INIT_LIST_HEAD(&agg->list); + + /* Only RTL8152_MAX_RX rx_agg need to be submitted. */ + if (++i > RTL8152_MAX_RX) { + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&agg->list, &tp->rx_used); + spin_unlock_irqrestore(&tp->rx_lock, flags); + } else if (unlikely(ret < 0)) { + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&agg->list, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); + } else { + ret = r8152_submit_rx(tp, agg, GFP_KERNEL); + } }
+ spin_lock_irqsave(&tp->rx_lock, flags); + WARN_ON(!list_empty(&tp->rx_info)); + list_splice(&tmp_list, &tp->rx_info); + spin_unlock_irqrestore(&tp->rx_lock, flags); + return ret; }
static int rtl_stop_rx(struct r8152 *tp) { - int i; + struct rx_agg *agg, *agg_next; + struct list_head tmp_list; + unsigned long flags; + + INIT_LIST_HEAD(&tmp_list); + + /* The usb_kill_urb() couldn't be used in atomic. + * Therefore, move the list of rx_info to a tmp one. + * Then, list_for_each_entry_safe could be used without + * spin lock. + */ + + spin_lock_irqsave(&tp->rx_lock, flags); + list_splice_init(&tp->rx_info, &tmp_list); + spin_unlock_irqrestore(&tp->rx_lock, flags); + + list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { + /* At least RTL8152_MAX_RX rx_agg have the page_count being + * equal to 1, so the other ones could be freed safely. + */ + if (page_count(agg->page) > 1) + free_rx_agg(tp, agg); + else + usb_kill_urb(agg->urb); + }
- for (i = 0; i < RTL8152_MAX_RX; i++) - usb_kill_urb(tp->rx_info[i].urb); + /* Move back the list of temp to the rx_info */ + spin_lock_irqsave(&tp->rx_lock, flags); + WARN_ON(!list_empty(&tp->rx_info)); + list_splice(&tmp_list, &tp->rx_info); + spin_unlock_irqrestore(&tp->rx_lock, flags);
while (!skb_queue_empty(&tp->rx_queue)) dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); @@@ -2606,7 -2450,7 +2606,7 @@@ static void r8153_set_rx_early_timeout(
static void r8153_set_rx_early_size(struct r8152 *tp) { - u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu); + u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu);
switch (tp->version) { case RTL_VER_03: @@@ -3205,76 -3049,10 +3205,76 @@@ static void r8152_eee_en(struct r8152 * ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); }
-static void r8152b_enable_eee(struct r8152 *tp) +static void r8153_eee_en(struct r8152 *tp, bool enable) { - r8152_eee_en(tp, true); - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); + u32 ocp_data; + u16 config; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + config = ocp_reg_read(tp, OCP_EEE_CFG); + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config |= EEE10_EN; + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config &= ~EEE10_EN; + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); + ocp_reg_write(tp, OCP_EEE_CFG, config); +} + +static void r8153b_eee_en(struct r8152 *tp, bool enable) +{ + r8153_eee_en(tp, enable); + + if (enable) + r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0); + else + r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE); +} + +static void rtl_eee_enable(struct r8152 *tp, bool enable) +{ + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_07: + if (enable) { + r8152_eee_en(tp, true); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, + tp->eee_adv); + } else { + r8152_eee_en(tp, false); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); + } + break; + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + if (enable) { + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8153_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; + case RTL_VER_08: + case RTL_VER_09: + if (enable) { + r8153b_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8153b_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; + default: + break; + } }
static void r8152b_enable_fc(struct r8152 *tp) @@@ -3295,7 -3073,7 +3295,7 @@@ static void rtl8152_disable(struct r815
static void r8152b_hw_phy_cfg(struct r8152 *tp) { - r8152b_enable_eee(tp); + rtl_eee_enable(tp, tp->eee_en); r8152_aldps_en(tp, true); r8152b_enable_fc(tp);
@@@ -3489,6 -3267,36 +3489,6 @@@ static void r8153b_aldps_en(struct r815 r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS); }
-static void r8153_eee_en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - u16 config; - - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - config = ocp_reg_read(tp, OCP_EEE_CFG); - - if (enable) { - ocp_data |= EEE_RX_EN | EEE_TX_EN; - config |= EEE10_EN; - } else { - ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); - config &= ~EEE10_EN; - } - - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - ocp_reg_write(tp, OCP_EEE_CFG, config); -} - -static void r8153b_eee_en(struct r8152 *tp, bool enable) -{ - r8153_eee_en(tp, enable); - - if (enable) - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0); - else - r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE); -} - static void r8153b_enable_fc(struct r8152 *tp) { r8152b_enable_fc(tp); @@@ -3504,7 -3312,8 +3504,7 @@@ static void r8153_hw_phy_cfg(struct r81 r8153_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */ - r8153_eee_en(tp, false); - ocp_reg_write(tp, OCP_EEE_ADV, 0); + rtl_eee_enable(tp, false);
if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); @@@ -3535,8 -3344,8 +3535,8 @@@ sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208);
- r8153_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) + rtl_eee_enable(tp, true);
r8153_aldps_en(tp, true); r8152b_enable_fc(tp); @@@ -3576,7 -3385,8 +3576,7 @@@ static void r8153b_hw_phy_cfg(struct r8 r8153b_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */ - r8153b_eee_en(tp, false); - ocp_reg_write(tp, OCP_EEE_ADV, 0); + rtl_eee_enable(tp, false);
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
@@@ -3638,8 -3448,8 +3638,8 @@@
r8153b_ups_flags_w1w0(tp, ups_flags, 0);
- r8153b_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) + rtl_eee_enable(tp, true);
r8153b_aldps_en(tp, true); r8153b_enable_fc(tp); @@@ -4060,11 -3870,9 +4060,11 @@@ static void set_carrier(struct r8152 *t } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); + tasklet_disable(&tp->tx_tl); napi_disable(napi); tp->rtl_ops.disable(tp); napi_enable(napi); + tasklet_enable(&tp->tx_tl); netif_info(tp, link, netdev, "carrier off\n"); } } @@@ -4097,10 -3905,10 +4097,10 @@@ static void rtl_work_func_t(struct work if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev);
- /* don't schedule napi before linking */ - if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) && + /* don't schedule tasket before linking */ + if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && netif_carrier_ok(tp->netdev)) - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl);
mutex_unlock(&tp->control);
@@@ -4186,7 -3994,6 +4186,7 @@@ static int rtl8152_open(struct net_devi goto out_unlock; } napi_enable(&tp->napi); + tasklet_enable(&tp->tx_tl);
mutex_unlock(&tp->control);
@@@ -4214,9 -4021,7 +4214,8 @@@ static int rtl8152_close(struct net_dev #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif + tasklet_disable(&tp->tx_tl); - if (!test_bit(RTL8152_UNPLUG, &tp->flags)) - napi_disable(&tp->napi); + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); @@@ -4484,7 -4289,6 +4483,7 @@@ static int rtl8152_pre_reset(struct usb return 0;
netif_stop_queue(netdev); + tasklet_disable(&tp->tx_tl); napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); @@@ -4528,7 -4332,6 +4527,7 @@@ static int rtl8152_post_reset(struct us }
napi_enable(&tp->napi); + tasklet_enable(&tp->tx_tl); netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL);
@@@ -4682,12 -4485,10 +4681,12 @@@ static int rtl8152_system_suspend(struc
clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); + tasklet_disable(&tp->tx_tl); napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); napi_enable(napi); + tasklet_enable(&tp->tx_tl); }
return 0; @@@ -4930,7 -4731,7 +4929,7 @@@ static void rtl8152_get_strings(struct
static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); @@@ -4942,10 -4743,13 +4941,10 @@@ val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; - eee->advertised = adv; + eee->advertised = tp->eee_adv; eee->lp_advertised = lp;
return 0; @@@ -4955,17 -4759,19 +4954,17 @@@ static int r8152_set_eee(struct r8152 * { u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
- r8152_eee_en(tp, eee->eee_enabled); + tp->eee_en = eee->eee_enabled; + tp->eee_adv = val;
- if (!eee->eee_enabled) - val = 0; - - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + rtl_eee_enable(tp, tp->eee_en);
return 0; }
static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE); @@@ -4977,15 -4783,46 +4976,15 @@@ val = ocp_reg_read(tp, OCP_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; - eee->advertised = adv; + eee->advertised = tp->eee_adv; eee->lp_advertised = lp;
return 0; }
-static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee) -{ - u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - - r8153_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); - - return 0; -} - -static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee) -{ - u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - - r8153b_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); - - return 0; -} - static int rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) { @@@ -5119,77 -4956,6 +5118,77 @@@ static int rtl8152_set_coalesce(struct return ret; }
+static int rtl8152_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tunable, void *d) +{ + struct r8152 *tp = netdev_priv(netdev); + + switch (tunable->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)d = tp->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int rtl8152_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tunable, + const void *d) +{ + struct r8152 *tp = netdev_priv(netdev); + u32 val; + + switch (tunable->id) { + case ETHTOOL_RX_COPYBREAK: + val = *(u32 *)d; + if (val < ETH_ZLEN) { + netif_err(tp, rx_err, netdev, + "Invalid rx copy break value\n"); + return -EINVAL; + } + + if (tp->rx_copybreak != val) { + napi_disable(&tp->napi); + tp->rx_copybreak = val; + napi_enable(&tp->napi); + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void rtl8152_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct r8152 *tp = netdev_priv(netdev); + + ring->rx_max_pending = RTL8152_RX_MAX_PENDING; + ring->rx_pending = tp->rx_pending; +} + +static int rtl8152_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (ring->rx_pending < (RTL8152_MAX_RX * 2)) + return -EINVAL; + + if (tp->rx_pending != ring->rx_pending) { + napi_disable(&tp->napi); + tp->rx_pending = ring->rx_pending; + napi_enable(&tp->napi); + } + + return 0; +} + static const struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, @@@ -5207,10 -4973,6 +5206,10 @@@ .set_eee = rtl_ethtool_set_eee, .get_link_ksettings = rtl8152_get_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings, + .get_tunable = rtl8152_get_tunable, + .set_tunable = rtl8152_set_tunable, + .get_ringparam = rtl8152_get_ringparam, + .set_ringparam = rtl8152_set_ringparam, };
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@@ -5355,9 -5117,6 +5354,9 @@@ static int rtl_ops_init(struct r8152 *t ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; + tp->rx_buf_sz = 16 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_100TX; break;
case RTL_VER_03: @@@ -5371,13 -5130,10 +5370,13 @@@ ops->down = rtl8153_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; + ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break;
case RTL_VER_08: @@@ -5389,13 -5145,10 +5388,13 @@@ ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; ops->eee_get = r8153_get_eee; - ops->eee_set = r8153b_set_eee; + ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break;
default: @@@ -5517,8 -5270,6 +5516,8 @@@ static int rtl8152_probe(struct usb_int mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); + tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp); + tasklet_disable(&tp->tx_tl);
netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@@ -5572,9 -5323,6 +5571,9 @@@ tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; tp->duplex = DUPLEX_FULL;
+ tp->rx_copybreak = RTL8152_RXFG_HEADSZ; + tp->rx_pending = 10 * RTL8152_MAX_RX; + intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp); @@@ -5604,8 -5352,6 +5603,7 @@@ return 0;
out1: - netif_napi_del(&tp->napi); + tasklet_kill(&tp->tx_tl); usb_set_intfdata(intf, NULL); out: free_netdev(netdev); @@@ -5620,9 -5366,7 +5618,8 @@@ static void rtl8152_disconnect(struct u if (tp) { rtl_set_unplug(tp);
- netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); + tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); diff --combined include/linux/phy.h index d26779f1fb6b,2fb9c8ffaf10..a7ecbe0e55aa --- a/include/linux/phy.h +++ b/include/linux/phy.h @@@ -403,8 -403,6 +403,8 @@@ struct phy_device __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + /* used with phy_speed_down */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
/* Energy efficient ethernet modes which should be prohibited */ u32 eee_broken_modes; @@@ -667,7 -665,6 +667,7 @@@ size_t phy_speeds(unsigned int *speeds unsigned long *mask); void of_set_phy_supported(struct phy_device *phydev); void of_set_phy_eee_broken(struct phy_device *phydev); +int phy_speed_down_core(struct phy_device *phydev);
/** * phy_is_started - Convenience function to check whether PHY is started @@@ -987,8 -984,6 +987,8 @@@ int phy_select_page(struct phy_device * int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set);
@@@ -1069,11 -1064,12 +1069,11 @@@ void phy_attached_print(struct phy_devi void phy_attached_info(struct phy_device *phydev);
/* Clause 22 PHY */ -int genphy_config_init(struct phy_device *phydev); int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_config_eee_advert(struct phy_device *phydev); -int genphy_config_aneg(struct phy_device *phydev); +int __genphy_config_aneg(struct phy_device *phydev, bool changed); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); @@@ -1081,12 -1077,6 +1081,12 @@@ int genphy_suspend(struct phy_device *p int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable); int genphy_soft_reset(struct phy_device *phydev); + +static inline int genphy_config_aneg(struct phy_device *phydev) +{ + return __genphy_config_aneg(phydev, false); +} + static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; @@@ -1117,6 -1107,7 +1117,7 @@@ int genphy_c45_an_disable_aneg(struct p int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); + int genphy_c45_config_aneg(struct phy_device *phydev);
/* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); diff --combined net/batman-adv/bat_v_ogm.c index 319249f0f85f,bc06e3cdfa84..dc4f7430cb5a --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@@ -17,14 -17,12 +17,14 @@@ #include <linux/kernel.h> #include <linux/kref.h> #include <linux/list.h> +#include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> @@@ -79,20 -77,6 +79,20 @@@ struct batadv_orig_node *batadv_v_ogm_o }
/** + * batadv_v_ogm_start_queue_timer() - restart the OGM aggregation timer + * @hard_iface: the interface to use to send the OGM + */ +static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface) +{ + unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000; + + /* msecs * [0.9, 1.1] */ + msecs += prandom_u32() % (msecs / 5) - (msecs / 10); + queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq, + msecs_to_jiffies(msecs / 1000)); +} + +/** * batadv_v_ogm_start_timer() - restart the OGM sending timer * @bat_priv: the bat priv with all the soft interface information */ @@@ -132,130 -116,6 +132,130 @@@ static void batadv_v_ogm_send_to_if(str }
/** + * batadv_v_ogm_len() - OGMv2 packet length + * @skb: the OGM to check + * + * Return: Length of the given OGMv2 packet, including tvlv length, excluding + * ethernet header length. + */ +static unsigned int batadv_v_ogm_len(struct sk_buff *skb) +{ + struct batadv_ogm2_packet *ogm_packet; + + ogm_packet = (struct batadv_ogm2_packet *)skb->data; + return BATADV_OGM2_HLEN + ntohs(ogm_packet->tvlv_len); +} + +/** + * batadv_v_ogm_queue_left() - check if given OGM still fits aggregation queue + * @skb: the OGM to check + * @hard_iface: the interface to use to send the OGM + * + * Caller needs to hold the hard_iface->bat_v.aggr_list_lock. + * + * Return: True, if the given OGMv2 packet still fits, false otherwise. + */ +static bool batadv_v_ogm_queue_left(struct sk_buff *skb, + struct batadv_hard_iface *hard_iface) +{ + unsigned int max = min_t(unsigned int, hard_iface->net_dev->mtu, + BATADV_MAX_AGGREGATION_BYTES); + unsigned int ogm_len = batadv_v_ogm_len(skb); + + lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock); + + return hard_iface->bat_v.aggr_len + ogm_len <= max; +} + +/** + * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue + * @hard_iface: the interface holding the aggregation queue + * + * Empties the OGMv2 aggregation queue and frees all the skbs it contained. + * + * Caller needs to hold the hard_iface->bat_v.aggr_list_lock. + */ +static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface) +{ + struct sk_buff *skb; + + lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock); + + while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) + kfree_skb(skb); + + hard_iface->bat_v.aggr_len = 0; +} + +/** + * batadv_v_ogm_aggr_send() - flush & send aggregation queue + * @hard_iface: the interface with the aggregation queue to flush + * + * Aggregates all OGMv2 packets currently in the aggregation queue into a + * single OGMv2 packet and transmits this aggregate. + * + * The aggregation queue is empty after this call. + * + * Caller needs to hold the hard_iface->bat_v.aggr_list_lock. + */ +static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) +{ + unsigned int aggr_len = hard_iface->bat_v.aggr_len; + struct sk_buff *skb_aggr; + unsigned int ogm_len; + struct sk_buff *skb; + + lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock); + + if (!aggr_len) + return; + + skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN); + if (!skb_aggr) { + batadv_v_ogm_aggr_list_free(hard_iface); + return; + } + + skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN); + skb_reset_network_header(skb_aggr); + + while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) { + hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb); + + ogm_len = batadv_v_ogm_len(skb); + skb_put_data(skb_aggr, skb->data, ogm_len); + + consume_skb(skb); + } + + batadv_v_ogm_send_to_if(skb_aggr, hard_iface); +} + +/** + * batadv_v_ogm_queue_on_if() - queue a batman ogm on a given interface + * @skb: the OGM to queue + * @hard_iface: the interface to queue the OGM on + */ +static void batadv_v_ogm_queue_on_if(struct sk_buff *skb, + struct batadv_hard_iface *hard_iface) +{ + struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + + if (!atomic_read(&bat_priv->aggregated_ogms)) { + batadv_v_ogm_send_to_if(skb, hard_iface); + return; + } + + spin_lock_bh(&hard_iface->bat_v.aggr_list_lock); + if (!batadv_v_ogm_queue_left(skb, hard_iface)) + batadv_v_ogm_aggr_send(hard_iface); + + hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb); + skb_queue_tail(&hard_iface->bat_v.aggr_list, skb); + spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock); +} + +/** * batadv_v_ogm_send() - periodic worker broadcasting the own OGM * @work: work queue item */ @@@ -350,7 -210,7 +350,7 @@@ static void batadv_v_ogm_send(struct wo break; }
- batadv_v_ogm_send_to_if(skb_tmp, hard_iface); + batadv_v_ogm_queue_on_if(skb_tmp, hard_iface); batadv_hardif_put(hard_iface); } rcu_read_unlock(); @@@ -364,27 -224,6 +364,27 @@@ out }
/** + * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface + * @work: work queue item + * + * Emits aggregated OGM message in regular intervals. + */ +void batadv_v_ogm_aggr_work(struct work_struct *work) +{ + struct batadv_hard_iface_bat_v *batv; + struct batadv_hard_iface *hard_iface; + + batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work); + hard_iface = container_of(batv, struct batadv_hard_iface, bat_v); + + spin_lock_bh(&hard_iface->bat_v.aggr_list_lock); + batadv_v_ogm_aggr_send(hard_iface); + spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock); + + batadv_v_ogm_start_queue_timer(hard_iface); +} + +/** * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V * @hard_iface: the interface to prepare * @@@ -396,26 -235,12 +396,26 @@@ int batadv_v_ogm_iface_enable(struct ba { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ batadv_v_ogm_start_queue_timer(hard_iface); batadv_v_ogm_start_timer(bat_priv);
return 0; }
/** + * batadv_v_ogm_iface_disable() - release OGM interface private resources + * @hard_iface: interface for which the resources have to be released + */ +void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface) +{ + cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq); + + spin_lock_bh(&hard_iface->bat_v.aggr_list_lock); + batadv_v_ogm_aggr_list_free(hard_iface); + spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock); +} + +/** * batadv_v_ogm_primary_iface_set() - set a new primary interface * @primary_iface: the new primary interface */ @@@ -557,7 -382,7 +557,7 @@@ static void batadv_v_ogm_forward(struc if_outgoing->net_dev->name, ntohl(ogm_forward->throughput), ogm_forward->ttl, if_incoming->net_dev->name);
- batadv_v_ogm_send_to_if(skb, if_outgoing); + batadv_v_ogm_queue_on_if(skb, if_outgoing);
out: if (orig_ifinfo) @@@ -806,17 -631,23 +806,23 @@@ batadv_v_ogm_process_per_outif(struct b * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated * @buff_pos: current position in the skb * @packet_len: total length of the skb - * @tvlv_len: tvlv length of the previously considered OGM + * @ogm2_packet: potential OGM2 in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ - static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) + static bool + batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, + const struct batadv_ogm2_packet *ogm2_packet) { int next_buff_pos = 0;
- next_buff_pos += buff_pos + BATADV_OGM2_HLEN; - next_buff_pos += ntohs(tvlv_len); + /* check if there is enough space for the header */ + next_buff_pos += buff_pos + sizeof(*ogm2_packet); + if (next_buff_pos > packet_len) + return false; + + /* check if there is enough space for the optional TVLV */ + next_buff_pos += ntohs(ogm2_packet->tvlv_len);
return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); @@@ -993,7 -824,7 +999,7 @@@ int batadv_v_ogm_packet_recv(struct sk_ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), - ogm_packet->tvlv_len)) { + ogm_packet)) { batadv_v_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM2_HLEN; diff --combined net/dsa/tag_8021q.c index e44e6275b0a1,67a1bc635a7b..9c1cc2482b68 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@@ -28,6 -28,7 +28,7 @@@ * * RSV - VID[9]: * To be used for further expansion of SWITCH_ID or for other purposes. + * Must be transmitted as zero and ignored on receive. * * SWITCH_ID - VID[8:6]: * Index of switch within DSA tree. Must be between 0 and @@@ -35,6 -36,7 +36,7 @@@ * * RSV - VID[5:4]: * To be used for further expansion of PORT or for other purposes. + * Must be transmitted as zero and ignored on receive. * * PORT - VID[3:0]: * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. @@@ -91,79 -93,6 +93,79 @@@ int dsa_8021q_rx_source_port(u16 vid } EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
+static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port) +{ + struct bridge_vlan_info vinfo; + struct net_device *slave; + u16 pvid; + int err; + + if (!dsa_is_user_port(ds, port)) + return 0; + + slave = ds->ports[port].slave; + + err = br_vlan_get_pvid(slave, &pvid); + if (err < 0) + /* There is no pvid on the bridge for this port, which is + * perfectly valid. Nothing to restore, bye-bye! + */ + return 0; + + err = br_vlan_get_info(slave, pvid, &vinfo); + if (err < 0) { + dev_err(ds->dev, "Couldn't determine PVID attributes\n"); + return err; + } + + return dsa_port_vid_add(&ds->ports[port], pvid, vinfo.flags); +} + +/* If @enabled is true, installs @vid with @flags into the switch port's HW + * filter. + * If @enabled is false, deletes @vid (ignores @flags) from the port. Had the + * user explicitly configured this @vid through the bridge core, then the @vid + * is installed again, but this time with the flags from the bridge layer. + */ +static int dsa_8021q_vid_apply(struct dsa_switch *ds, int port, u16 vid, + u16 flags, bool enabled) +{ + struct dsa_port *dp = &ds->ports[port]; + struct bridge_vlan_info vinfo; + int err; + + if (enabled) + return dsa_port_vid_add(dp, vid, flags); + + err = dsa_port_vid_del(dp, vid); + if (err < 0) + return err; + + /* Nothing to restore from the bridge for a non-user port. + * The CPU port VLANs are restored implicitly with the user ports, + * similar to how the bridge does in dsa_slave_vlan_add and + * dsa_slave_vlan_del. + */ + if (!dsa_is_user_port(ds, port)) + return 0; + + err = br_vlan_get_info(dp->slave, vid, &vinfo); + /* Couldn't determine bridge attributes for this vid, + * it means the bridge had not configured it. + */ + if (err < 0) + return 0; + + /* Restore the VID from the bridge */ + err = dsa_port_vid_add(dp, vid, vinfo.flags); + if (err < 0) + return err; + + vinfo.flags &= ~BRIDGE_VLAN_INFO_PVID; + + return dsa_port_vid_add(dp->cpu_dp, vid, vinfo.flags); +} + /* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single * front-panel switch port (here swp0). * @@@ -219,6 -148,8 +221,6 @@@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int port, bool enabled) { int upstream = dsa_upstream_port(ds, port); - struct dsa_port *dp = &ds->ports[port]; - struct dsa_port *upstream_dp = &ds->ports[upstream]; u16 rx_vid = dsa_8021q_rx_vid(ds, port); u16 tx_vid = dsa_8021q_tx_vid(ds, port); int i, err; @@@ -235,6 -166,7 +237,6 @@@ * restrictions, so there are no concerns about leaking traffic. */ for (i = 0; i < ds->num_ports; i++) { - struct dsa_port *other_dp = &ds->ports[i]; u16 flags;
if (i == upstream) @@@ -247,7 -179,10 +249,7 @@@ /* The RX VID is a regular VLAN on all others */ flags = BRIDGE_VLAN_INFO_UNTAGGED;
- if (enabled) - err = dsa_port_vid_add(other_dp, rx_vid, flags); - else - err = dsa_port_vid_del(other_dp, rx_vid); + err = dsa_8021q_vid_apply(ds, i, rx_vid, flags, enabled); if (err) { dev_err(ds->dev, "Failed to apply RX VID %d to port %d: %d\n", rx_vid, port, err); @@@ -258,7 -193,10 +260,7 @@@ /* CPU port needs to see this port's RX VID * as tagged egress. */ - if (enabled) - err = dsa_port_vid_add(upstream_dp, rx_vid, 0); - else - err = dsa_port_vid_del(upstream_dp, rx_vid); + err = dsa_8021q_vid_apply(ds, upstream, rx_vid, 0, enabled); if (err) { dev_err(ds->dev, "Failed to apply RX VID %d to port %d: %d\n", rx_vid, port, err); @@@ -266,24 -204,26 +268,24 @@@ }
/* Finally apply the TX VID on this port and on the CPU port */ - if (enabled) - err = dsa_port_vid_add(dp, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED); - else - err = dsa_port_vid_del(dp, tx_vid); + err = dsa_8021q_vid_apply(ds, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED, + enabled); if (err) { dev_err(ds->dev, "Failed to apply TX VID %d on port %d: %d\n", tx_vid, port, err); return err; } - if (enabled) - err = dsa_port_vid_add(upstream_dp, tx_vid, 0); - else - err = dsa_port_vid_del(upstream_dp, tx_vid); + err = dsa_8021q_vid_apply(ds, upstream, tx_vid, 0, enabled); if (err) { dev_err(ds->dev, "Failed to apply TX VID %d on port %d: %d\n", tx_vid, upstream, err); return err; }
- return 0; + if (!enabled) + err = dsa_8021q_restore_pvid(ds, port); + + return err; } EXPORT_SYMBOL_GPL(dsa_port_setup_8021q_tagging);
diff --combined net/ipv4/tcp.c index 051ef10374f6,61082065b26a..94df48bcecc2 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -935,6 -935,22 +935,22 @@@ static int tcp_send_mss(struct sock *sk return mss_now; }
+ /* In some cases, both sendpage() and sendmsg() could have added + * an skb to the write queue, but failed adding payload on it. + * We need to remove it to consume less memory, but more + * importantly be able to generate EPOLLOUT for Edge Trigger epoll() + * users. + */ + static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) + { + if (skb && !skb->len) { + tcp_unlink_write_queue(skb, sk); + if (tcp_write_queue_empty(sk)) + tcp_chrono_stop(sk, TCP_CHRONO_BUSY); + sk_wmem_free_skb(sk, skb); + } + } + ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags) { @@@ -1064,6 -1080,7 +1080,7 @@@ out return copied;
do_error: + tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); if (copied) goto out; out_err: @@@ -1165,7 -1182,7 +1182,7 @@@ int tcp_sendmsg_locked(struct sock *sk struct sockcm_cookie sockc; int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; - bool process_backlog = false; + int process_backlog = 0; bool zc = false; long timeo;
@@@ -1257,10 -1274,9 +1274,10 @@@ new_segment if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf;
- if (process_backlog && sk_flush_backlog(sk)) { - process_backlog = false; - goto restart; + if (unlikely(process_backlog >= 16)) { + process_backlog = 0; + if (sk_flush_backlog(sk)) + goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, @@@ -1268,7 -1284,7 +1285,7 @@@ if (!skb) goto wait_for_memory;
- process_backlog = true; + process_backlog++; skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, skb); @@@ -1389,18 -1405,11 +1406,11 @@@ out_nopush sock_zerocopy_put(uarg); return copied + copied_syn;
+ do_error: + skb = tcp_write_queue_tail(sk); do_fault: - if (!skb->len) { - tcp_unlink_write_queue(skb, sk); - /* It is the one place in all of TCP, except connection - * reset, where we can be unlinking the send_head. - */ - if (tcp_write_queue_empty(sk)) - tcp_chrono_stop(sk, TCP_CHRONO_BUSY); - sk_wmem_free_skb(sk, skb); - } + tcp_remove_empty_skb(sk, skb);
- do_error: if (copied + copied_syn) goto out; out_err: @@@ -1780,21 -1789,19 +1790,21 @@@ static int tcp_zerocopy_receive(struct break; frags = skb_shinfo(skb)->frags; while (offset) { - if (frags->size > offset) + if (skb_frag_size(frags) > offset) goto out; - offset -= frags->size; + offset -= skb_frag_size(frags); frags++; } } - if (frags->size != PAGE_SIZE || frags->page_offset) { + if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) { int remaining = zc->recv_skip_hint; + int size = skb_frag_size(frags);
- while (remaining && (frags->size != PAGE_SIZE || - frags->page_offset)) { - remaining -= frags->size; + while (remaining && (size != PAGE_SIZE || + skb_frag_off(frags))) { + remaining -= size; frags++; + size = skb_frag_size(frags); } zc->recv_skip_hint -= remaining; break; @@@ -3787,8 -3794,8 +3797,8 @@@ int tcp_md5_hash_skb_data(struct tcp_md return 1;
for (i = 0; i < shi->nr_frags; ++i) { - const struct skb_frag_struct *f = &shi->frags[i]; - unsigned int offset = f->page_offset; + const skb_frag_t *f = &shi->frags[i]; + unsigned int offset = skb_frag_off(f); struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
sg_set_page(&sg, page, skb_frag_size(f), diff --combined net/ipv4/tcp_output.c index 5c46bc4c7e8d,8a645f304e6c..42abc9bd687a --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -1403,7 -1403,7 +1403,7 @@@ static int __pskb_trim_head(struct sk_b } else { shinfo->frags[k] = shinfo->frags[i]; if (eat) { - shinfo->frags[k].page_offset += eat; + skb_frag_off_add(&shinfo->frags[k], eat); skb_frag_size_sub(&shinfo->frags[k], eat); eat = 0; } @@@ -2053,7 -2053,7 +2053,7 @@@ static bool tcp_can_coalesce_send_queue if (len <= skb->len) break;
- if (unlikely(TCP_SKB_CB(skb)->eor)) + if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) return false;
len -= skb->len; @@@ -2170,6 -2170,7 +2170,7 @@@ static int tcp_mtu_probe(struct sock *s * we need to propagate it to the new skb. */ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; + tcp_skb_collapse_tstamp(nskb, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); } else { diff --combined net/netfilter/nf_conntrack_ftp.c index c57d2348c505,8d96738b7dfd..9eca90414bb7 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@@ -162,7 -162,7 +162,7 @@@ static int try_rfc959(const char *data if (length == 0) return 0;
- cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | + cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3]); cmd->u.tcp.port = htons((array[4] << 8) | array[5]); return length; @@@ -322,7 -322,7 +322,7 @@@ static int find_pattern(const char *dat i++; }
- pr_debug("Skipped up to `%c'!\n", skip); + pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
*numoff = i; *numlen = getnum(data + i, dlen - i, cmd, term, numoff); diff --combined net/netfilter/nf_conntrack_standalone.c index d97f4ea47cf3,0006503d2da9..88d4127df863 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@@ -511,6 -511,8 +511,6 @@@ static void nf_conntrack_standalone_fin /* Log invalid packets of a given protocol */ static int log_invalid_proto_min __read_mostly; static int log_invalid_proto_max __read_mostly = 255; -static int zero; -static int one = 1;
/* size the user *wants to set */ static unsigned int nf_conntrack_htable_size_user __read_mostly; @@@ -627,8 -629,8 +627,8 @@@ static struct ctl_table nf_ct_sysctl_ta .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_LOG_INVALID] = { .procname = "nf_conntrack_log_invalid", @@@ -652,8 -654,8 +652,8 @@@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_HELPER] = { .procname = "nf_conntrack_helper", @@@ -661,8 -663,8 +661,8 @@@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, #ifdef CONFIG_NF_CONNTRACK_EVENTS [NF_SYSCTL_CT_EVENTS] = { @@@ -671,8 -673,8 +671,8 @@@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP @@@ -682,8 -684,8 +682,8 @@@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, #endif [NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC] = { @@@ -757,16 -759,16 +757,16 @@@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_PROTO_TCP_LIBERAL] = { .procname = "nf_conntrack_tcp_be_liberal", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = { .procname = "nf_conntrack_tcp_max_retrans", @@@ -902,8 -904,8 +902,8 @@@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, #endif #ifdef CONFIG_NF_CT_PROTO_GRE @@@ -1035,9 -1037,14 +1035,14 @@@ static int nf_conntrack_standalone_init table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; + table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; + table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; #ifdef CONFIG_NF_CONNTRACK_EVENTS table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; #endif + #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; + #endif table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; diff --combined net/rds/recv.c index 7e451c82595b,a42ba7fa06d5..c8404971d5ab --- a/net/rds/recv.c +++ b/net/rds/recv.c @@@ -1,5 -1,5 +1,5 @@@ /* - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@@ -47,8 -47,8 +47,8 @@@ void rds_inc_init(struct rds_incoming * INIT_LIST_HEAD(&inc->i_item); inc->i_conn = conn; inc->i_saddr = *saddr; - inc->i_rdma_cookie = 0; - inc->i_rx_tstamp = ktime_set(0, 0); + inc->i_usercopy.rdma_cookie = 0; + inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace)); } @@@ -62,8 -62,8 +62,8 @@@ void rds_inc_path_init(struct rds_incom inc->i_conn = cp->cp_conn; inc->i_conn_path = cp; inc->i_saddr = *saddr; - inc->i_rdma_cookie = 0; - inc->i_rx_tstamp = ktime_set(0, 0); + inc->i_usercopy.rdma_cookie = 0; + inc->i_usercopy.rx_tstamp = ktime_set(0, 0); } EXPORT_SYMBOL_GPL(rds_inc_path_init);
@@@ -186,7 -186,7 +186,7 @@@ static void rds_recv_incoming_exthdrs(s case RDS_EXTHDR_RDMA_DEST: /* We ignore the size for now. We could stash it * somewhere and use it for error checking. */ - inc->i_rdma_cookie = rds_rdma_make_cookie( + inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie( be32_to_cpu(buffer.rdma_dest.h_rdma_rkey), be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
@@@ -380,7 -380,7 +380,7 @@@ void rds_recv_incoming(struct rds_conne be32_to_cpu(inc->i_hdr.h_len), inc->i_hdr.h_dport); if (sock_flag(sk, SOCK_RCVTSTAMP)) - inc->i_rx_tstamp = ktime_get_real(); + inc->i_usercopy.rx_tstamp = ktime_get_real(); rds_inc_addref(inc); inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock(); list_add_tail(&inc->i_item, &rs->rs_recv_queue); @@@ -540,18 -540,16 +540,18 @@@ static int rds_cmsg_recv(struct rds_inc { int ret = 0;
- if (inc->i_rdma_cookie) { + if (inc->i_usercopy.rdma_cookie) { ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST, - sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie); + sizeof(inc->i_usercopy.rdma_cookie), + &inc->i_usercopy.rdma_cookie); if (ret) goto out; }
- if ((inc->i_rx_tstamp != 0) && + if ((inc->i_usercopy.rx_tstamp != 0) && sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) { - struct __kernel_old_timeval tv = ns_to_kernel_old_timeval(inc->i_rx_tstamp); + struct __kernel_old_timeval tv = + ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp);
if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) { ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, @@@ -813,6 -811,7 +813,7 @@@ void rds6_inc_info_copy(struct rds_inco
minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); minfo6.len = be32_to_cpu(inc->i_hdr.h_len); + minfo6.tos = inc->i_conn->c_tos;
if (flip) { minfo6.laddr = *daddr; @@@ -826,6 -825,8 +827,8 @@@ minfo6.fport = inc->i_hdr.h_dport; }
+ minfo6.flags = 0; + rds_info_copy(iter, &minfo6, sizeof(minfo6)); } #endif diff --combined net/rxrpc/ar-internal.h index fa5b030acaa8,8051dfdcf26d..1091bf35a199 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@@ -185,11 -185,17 +185,17 @@@ struct rxrpc_host_header * - max 48 bytes (struct sk_buff::cb) */ struct rxrpc_skb_priv { - union { - u8 nr_jumbo; /* Number of jumbo subpackets */ - }; + atomic_t nr_ring_pins; /* Number of rxtx ring pins */ + u8 nr_subpackets; /* Number of subpackets */ + u8 rx_flags; /* Received packet flags */ + #define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ + #define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */ union { int remain; /* amount of space remaining for next write */ + + /* List of requested ACKs on subpackets */ + unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) / + BITS_PER_LONG]; };
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ @@@ -226,9 -232,6 +232,9 @@@ struct rxrpc_security int (*verify_packet)(struct rxrpc_call *, struct sk_buff *, unsigned int, unsigned int, rxrpc_seq_t, u16);
+ /* Free crypto request on a call */ + void (*free_call_crypto)(struct rxrpc_call *); + /* Locate the data in a received packet that has been verified. */ void (*locate_data)(struct rxrpc_call *, struct sk_buff *, unsigned int *, unsigned int *); @@@ -561,7 -564,6 +567,7 @@@ struct rxrpc_call unsigned long expect_term_by; /* When we expect call termination by */ u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ + struct skcipher_request *cipher_req; /* Packet cipher request buffer */ struct timer_list timer; /* Combined event timer */ struct work_struct processor; /* Event processor */ rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ @@@ -617,8 -619,7 +623,7 @@@ #define RXRPC_TX_ANNO_LAST 0x04 #define RXRPC_TX_ANNO_RESENT 0x08
- #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ - #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */ + #define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */ #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but * not hard-ACK'd packet follows this. @@@ -909,6 -910,7 +914,7 @@@ void rxrpc_disconnect_client_call(struc void rxrpc_put_client_conn(struct rxrpc_connection *); void rxrpc_discard_expired_client_conns(struct work_struct *); void rxrpc_destroy_all_client_connections(struct rxrpc_net *); + void rxrpc_clean_up_local_conns(struct rxrpc_local *);
/* * conn_event.c @@@ -1109,6 -1111,7 +1115,7 @@@ void rxrpc_kernel_data_consumed(struct void rxrpc_packet_destructor(struct sk_buff *); void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); + void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_purge_queue(struct sk_buff_head *); diff --combined net/rxrpc/call_object.c index 60cbc81dc461,014548c259ce..32d8dc677142 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@@ -422,6 -422,19 +422,19 @@@ void rxrpc_get_call(struct rxrpc_call * }
/* + * Clean up the RxTx skb ring. + */ + static void rxrpc_cleanup_ring(struct rxrpc_call *call) + { + int i; + + for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { + rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned); + call->rxtx_buffer[i] = NULL; + } + } + + /* * Detach a call from its owning socket. */ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) @@@ -429,7 -442,6 +442,6 @@@ const void *here = __builtin_return_address(0); struct rxrpc_connection *conn = call->conn; bool put = false; - int i;
_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
@@@ -476,18 -488,10 +488,12 @@@
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- if (conn) + if (conn) { rxrpc_disconnect_call(call); + conn->security->free_call_crypto(call); + }
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); - call->rxtx_buffer[i] = NULL; - } - + rxrpc_cleanup_ring(call); _leave(""); }
@@@ -570,8 -574,6 +576,6 @@@ static void rxrpc_rcu_destroy_call(stru */ void rxrpc_cleanup_call(struct rxrpc_call *call) { - int i; - _net("DESTROY CALL %d", call->debug_id);
memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); @@@ -582,13 -584,8 +586,8 @@@ ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); ASSERTCMP(call->conn, ==, NULL);
- /* Clean up the Rx/Tx buffer */ - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); - - rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); + rxrpc_cleanup_ring(call); + rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
call_rcu(&call->rcu, rxrpc_rcu_destroy_call); } diff --combined net/rxrpc/rxkad.c index dbb109da1835,c60c520fde7c..8d8aa3c230b5 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@@ -43,7 -43,6 +43,7 @@@ struct rxkad_level2_hdr * packets */ static struct crypto_sync_skcipher *rxkad_ci; +static struct skcipher_request *rxkad_ci_req; static DEFINE_MUTEX(rxkad_ci_mutex);
/* @@@ -100,8 -99,8 +100,8 @@@ error */ static int rxkad_prime_packet_security(struct rxrpc_connection *conn) { + struct skcipher_request *req; struct rxrpc_key_token *token; - SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); struct scatterlist sg; struct rxrpc_crypt iv; __be32 *tmpbuf; @@@ -116,12 -115,6 +116,12 @@@ if (!tmpbuf) return -ENOMEM;
+ req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS); + if (!req) { + kfree(tmpbuf); + return -ENOMEM; + } + token = conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv));
@@@ -135,7 -128,7 +135,7 @@@ skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + skcipher_request_free(req);
memcpy(&conn->csum_iv, tmpbuf + 2, sizeof(conn->csum_iv)); kfree(tmpbuf); @@@ -144,35 -137,6 +144,35 @@@ }
/* + * Allocate and prepare the crypto request on a call. For any particular call, + * this is called serially for the packets, so no lock should be necessary. + */ +static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call) +{ + struct crypto_skcipher *tfm = &call->conn->cipher->base; + struct skcipher_request *cipher_req = call->cipher_req; + + if (!cipher_req) { + cipher_req = skcipher_request_alloc(tfm, GFP_NOFS); + if (!cipher_req) + return NULL; + call->cipher_req = cipher_req; + } + + return cipher_req; +} + +/* + * Clean up the crypto on a call. + */ +static void rxkad_free_call_crypto(struct rxrpc_call *call) +{ + if (call->cipher_req) + skcipher_request_free(call->cipher_req); + call->cipher_req = NULL; +} + +/* * partially encrypt a packet (level 1 security) */ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, @@@ -223,10 -187,8 +223,8 @@@ static int rxkad_secure_packet_encrypt( struct rxrpc_skb_priv *sp; struct rxrpc_crypt iv; struct scatterlist sg[16]; - struct sk_buff *trailer; unsigned int len; u16 check; - int nsg; int err;
sp = rxrpc_skb(skb); @@@ -250,15 -212,14 +248,14 @@@ crypto_skcipher_encrypt(req);
/* we want to encrypt the skbuff in-place */ - nsg = skb_cow_data(skb, 0, &trailer); - err = -ENOMEM; - if (nsg < 0 || nsg > 16) + err = -EMSGSIZE; + if (skb_shinfo(skb)->nr_frags > 16) goto out;
len = data_size + call->conn->size_align - 1; len &= ~(call->conn->size_align - 1);
- sg_init_table(sg, nsg); + sg_init_table(sg, ARRAY_SIZE(sg)); err = skb_to_sgvec(skb, sg, 0, len); if (unlikely(err < 0)) goto out; @@@ -282,7 -243,7 +279,7 @@@ static int rxkad_secure_packet(struct r void *sechdr) { struct rxrpc_skb_priv *sp; - SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); + struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg; u32 x, y; @@@ -301,10 -262,6 +298,10 @@@ if (ret < 0) return ret;
+ req = rxkad_get_call_crypto(call); + if (!req) + return -ENOMEM; + /* continue encrypting from where we left off */ memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
@@@ -359,11 -316,10 +356,10 @@@ static int rxkad_verify_packet_1(struc struct rxkad_level1_hdr sechdr; struct rxrpc_crypt iv; struct scatterlist sg[16]; - struct sk_buff *trailer; bool aborted; u32 data_size, buf; u16 check; - int nsg, ret; + int ret;
_enter("");
@@@ -376,11 -332,7 +372,7 @@@ /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ - nsg = skb_cow_data(skb, 0, &trailer); - if (nsg < 0 || nsg > 16) - goto nomem; - - sg_init_table(sg, nsg); + sg_init_table(sg, ARRAY_SIZE(sg)); ret = skb_to_sgvec(skb, sg, offset, 8); if (unlikely(ret < 0)) return ret; @@@ -428,10 -380,6 +420,6 @@@ protocol_error if (aborted) rxrpc_send_abort_packet(call); return -EPROTO; - - nomem: - _leave(" = -ENOMEM"); - return -ENOMEM; }
/* @@@ -446,7 -394,6 +434,6 @@@ static int rxkad_verify_packet_2(struc struct rxkad_level2_hdr sechdr; struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; - struct sk_buff *trailer; bool aborted; u32 data_size, buf; u16 check; @@@ -463,12 -410,11 +450,11 @@@ /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ - nsg = skb_cow_data(skb, 0, &trailer); - if (nsg < 0) - goto nomem; - sg = _sg; - if (unlikely(nsg > 4)) { + nsg = skb_shinfo(skb)->nr_frags; + if (nsg <= 4) { + nsg = 4; + } else { sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); if (!sg) goto nomem; @@@ -542,7 -488,7 +528,7 @@@ static int rxkad_verify_packet(struct r unsigned int offset, unsigned int len, rxrpc_seq_t seq, u16 expected_cksum) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); + struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg; bool aborted; @@@ -555,10 -501,6 +541,10 @@@ if (!call->conn->cipher) return 0;
+ req = rxkad_get_call_crypto(call); + if (!req) + return -ENOMEM; + /* continue encrypting from where we left off */ memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
@@@ -791,18 -733,14 +777,18 @@@ static void rxkad_calc_response_checksu /* * encrypt the response packet */ -static void rxkad_encrypt_response(struct rxrpc_connection *conn, - struct rxkad_response *resp, - const struct rxkad_key *s2) +static int rxkad_encrypt_response(struct rxrpc_connection *conn, + struct rxkad_response *resp, + const struct rxkad_key *s2) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); + struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg[1];
+ req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS); + if (!req) + return -ENOMEM; + /* continue encrypting from where we left off */ memcpy(&iv, s2->session_key, sizeof(iv));
@@@ -812,8 -750,7 +798,8 @@@ skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + skcipher_request_free(req); + return 0; }
/* @@@ -888,9 -825,8 +874,9 @@@ static int rxkad_respond_to_challenge(s
/* calculate the response checksum and then do the encryption */ rxkad_calc_response_checksum(resp); - rxkad_encrypt_response(conn, resp, token->kad); - ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad); + ret = rxkad_encrypt_response(conn, resp, token->kad); + if (ret == 0) + ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad); kfree(resp); return ret;
@@@ -1067,16 -1003,18 +1053,16 @@@ static void rxkad_decrypt_response(stru struct rxkad_response *resp, const struct rxrpc_crypt *session_key) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci); + struct skcipher_request *req = rxkad_ci_req; struct scatterlist sg[1]; struct rxrpc_crypt iv;
_enter(",,%08x%08x", ntohl(session_key->n[0]), ntohl(session_key->n[1]));
- ASSERT(rxkad_ci != NULL); - mutex_lock(&rxkad_ci_mutex); if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x, - sizeof(*session_key)) < 0) + sizeof(*session_key)) < 0) BUG();
memcpy(&iv, session_key, sizeof(iv)); @@@ -1270,26 -1208,10 +1256,26 @@@ static void rxkad_clear(struct rxrpc_co */ static int rxkad_init(void) { + struct crypto_sync_skcipher *tfm; + struct skcipher_request *req; + /* pin the cipher we need so that the crypto layer doesn't invoke * keventd to go get it */ - rxkad_ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0); - return PTR_ERR_OR_ZERO(rxkad_ci); + tfm = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + req = skcipher_request_alloc(&tfm->base, GFP_KERNEL); + if (!req) + goto nomem_tfm; + + rxkad_ci_req = req; + rxkad_ci = tfm; + return 0; + +nomem_tfm: + crypto_free_sync_skcipher(tfm); + return -ENOMEM; }
/* @@@ -1297,8 -1219,8 +1283,8 @@@ */ static void rxkad_exit(void) { - if (rxkad_ci) - crypto_free_sync_skcipher(rxkad_ci); + crypto_free_sync_skcipher(rxkad_ci); + skcipher_request_free(rxkad_ci_req); }
/* @@@ -1313,7 -1235,6 +1299,7 @@@ const struct rxrpc_security rxkad = .prime_packet_security = rxkad_prime_packet_security, .secure_packet = rxkad_secure_packet, .verify_packet = rxkad_verify_packet, + .free_call_crypto = rxkad_free_call_crypto, .locate_data = rxkad_locate_data, .issue_challenge = rxkad_issue_challenge, .respond_to_challenge = rxkad_respond_to_challenge, diff --combined net/sched/act_vlan.c index 216b75709875,287a30bf8930..08aaf719a70f --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@@ -301,19 -301,6 +301,19 @@@ static int tcf_vlan_walker(struct net * return tcf_generic_walker(tn, skb, cb, type, ops, extack); }
+static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets, + u64 lastuse, bool hw) +{ + struct tcf_vlan *v = to_vlan(a); + struct tcf_t *tm = &v->tcf_tm; + + _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); + if (hw) + _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), + bytes, packets); + tm->lastuse = max_t(u64, tm->lastuse, lastuse); +} + static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, vlan_net_id); @@@ -338,7 -325,6 +338,7 @@@ static struct tc_action_ops act_vlan_op .init = tcf_vlan_init, .cleanup = tcf_vlan_cleanup, .walk = tcf_vlan_walker, + .stats_update = tcf_vlan_stats_update, .get_fill_size = tcf_vlan_get_fill_size, .lookup = tcf_vlan_search, .size = sizeof(struct tcf_vlan), @@@ -348,7 -334,7 +348,7 @@@ static __net_init int vlan_init_net(str { struct tc_action_net *tn = net_generic(net, vlan_net_id);
- return tc_action_net_init(tn, &act_vlan_ops); + return tc_action_net_init(net, tn, &act_vlan_ops); }
static void __net_exit vlan_exit_net(struct list_head *net_list) diff --combined net/sched/sch_taprio.c index 540bde009ea5,8d8bc2ec5cd6..84b863e2bdbd --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@@ -477,11 -477,6 +477,6 @@@ static struct sk_buff *taprio_dequeue(s u32 gate_mask; int i;
- if (atomic64_read(&q->picos_per_byte) == -1) { - WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte."); - return NULL; - } - rcu_read_lock(); entry = rcu_dereference(q->current_entry); /* if there's no entry, it means that the schedule didn't @@@ -677,6 -672,10 +672,6 @@@ static const struct nla_policy entry_po [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, };
-static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = { - [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED }, -}; - static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_PRIOMAP] = { .len = sizeof(struct tc_mqprio_qopt) @@@ -954,12 -953,20 +949,20 @@@ static void taprio_set_picos_per_byte(s struct taprio_sched *q) { struct ethtool_link_ksettings ecmd; - int picos_per_byte = -1; + int speed = SPEED_10; + int picos_per_byte; + int err;
- if (!__ethtool_get_link_ksettings(dev, &ecmd) && - ecmd.base.speed != SPEED_UNKNOWN) - picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, - ecmd.base.speed * 1000 * 1000); + err = __ethtool_get_link_ksettings(dev, &ecmd); + if (err < 0) + goto skip; + + if (ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; + + skip: + picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, + speed * 1000 * 1000);
atomic64_set(&q->picos_per_byte, picos_per_byte); netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", @@@ -1245,6 -1252,10 +1248,10 @@@ static int taprio_init(struct Qdisc *sc */ q->clockid = -1;
+ spin_lock(&taprio_list_lock); + list_add(&q->taprio_list, &taprio_list); + spin_unlock(&taprio_list_lock); + if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP;
@@@ -1262,10 -1273,6 +1269,6 @@@ if (!opt) return -EINVAL;
- spin_lock(&taprio_list_lock); - list_add(&q->taprio_list, &taprio_list); - spin_unlock(&taprio_list_lock); - for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *dev_queue; struct Qdisc *qdisc;
linux-merge@lists.open-mesh.org