The following commit has been merged in the master branch: commit 9def0655dea2b63a651b04a15509b05252520200 Merge: b089b369dfc9ad81f3cc88351b0664d8e865c444 ef3f6c256f0b4711a3ef1489797b95820be5ab01 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Mar 6 09:53:19 2018 +1100
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index 232335d064b6,e0b39004edc0..834d8e2bc69f --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -868,17 -868,6 +868,17 @@@ X: drivers/iio/*/adjd F: drivers/staging/iio/*/ad* F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+ANDES ARCHITECTURE +M: Greentime Hu green.hu@gmail.com +M: Vincent Chen deanbo422@gmail.com +T: git https://github.com/andestech/linux.git +S: Supported +F: arch/nds32/ +F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt +F: Documentation/devicetree/bindings/nds32/ +K: nds32 +N: nds32 + ANDROID CONFIG FRAGMENTS M: Rob Herring robh@kernel.org S: Supported @@@ -1163,7 -1152,7 +1163,7 @@@ S: Maintaine F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support -M: Maxime Ripard maxime.ripard@free-electrons.com +M: Maxime Ripard maxime.ripard@bootlin.com M: Chen-Yu Tsai wens@csie.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -1247,6 -1236,27 +1247,6 @@@ M: Boris Brezillon <boris.brezillon@fre S: Maintained F: drivers/clk/at91
-ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT -M: Nicolas Ferre nicolas.ferre@microchip.com -M: Alexandre Belloni alexandre.belloni@free-electrons.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.linux4sam.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git -S: Supported -N: at91 -N: atmel -F: arch/arm/mach-at91/ -F: include/soc/at91/ -F: arch/arm/boot/dts/at91*.dts -F: arch/arm/boot/dts/at91*.dtsi -F: arch/arm/boot/dts/sama*.dts -F: arch/arm/boot/dts/sama*.dtsi -F: arch/arm/include/debug/at91.S -F: drivers/memory/atmel* -F: drivers/watchdog/sama5d4_wdt.c -X: drivers/input/touchscreen/atmel_mxt_ts.c -X: drivers/net/wireless/atmel/ - ARM/CALXEDA HIGHBANK ARCHITECTURE M: Rob Herring robh@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1580,7 -1590,7 +1580,7 @@@ ARM/Marvell Dove/MV78xx0/Orion SOC supp M: Jason Cooper jason@lakedaemon.net M: Andrew Lunn andrew@lunn.ch M: Sebastian Hesselbarth sebastian.hesselbarth@gmail.com -M: Gregory Clement gregory.clement@free-electrons.com +M: Gregory Clement gregory.clement@bootlin.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/soc/dove/ @@@ -1594,7 -1604,7 +1594,7 @@@ F: arch/arm/boot/dts/orion5x ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support M: Jason Cooper jason@lakedaemon.net M: Andrew Lunn andrew@lunn.ch -M: Gregory Clement gregory.clement@free-electrons.com +M: Gregory Clement gregory.clement@bootlin.com M: Sebastian Hesselbarth sebastian.hesselbarth@gmail.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -1646,27 -1656,6 +1646,27 @@@ L: linux-arm-kernel@lists.infradead.or F: arch/arm/mach-ks8695/ S: Odd Fixes
+ARM/Microchip (AT91) SoC support +M: Nicolas Ferre nicolas.ferre@microchip.com +M: Alexandre Belloni alexandre.belloni@bootlin.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +W: http://www.linux4sam.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git +S: Supported +N: at91 +N: atmel +F: arch/arm/mach-at91/ +F: include/soc/at91/ +F: arch/arm/boot/dts/at91*.dts +F: arch/arm/boot/dts/at91*.dtsi +F: arch/arm/boot/dts/sama*.dts +F: arch/arm/boot/dts/sama*.dtsi +F: arch/arm/include/debug/at91.S +F: drivers/memory/atmel* +F: drivers/watchdog/sama5d4_wdt.c +X: drivers/input/touchscreen/atmel_mxt_ts.c +X: drivers/net/wireless/atmel/ + ARM/MIOA701 MACHINE SUPPORT M: Robert Jarzmik robert.jarzmik@free.fr L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1874,6 -1863,7 +1874,6 @@@ Q: https://patchwork.kernel.org/project S: Maintained F: arch/arm/boot/dts/s3c* F: arch/arm/boot/dts/s5p* -F: arch/arm/boot/dts/samsung* F: arch/arm/boot/dts/exynos* F: arch/arm64/boot/dts/exynos/ F: arch/arm/plat-samsung/ @@@ -2009,10 -1999,8 +2009,10 @@@ M: Maxime Coquelin <mcoquelin.stm32@gma M: Alexandre Torgue alexandre.torgue@st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next N: stm32 +F: arch/arm/boot/dts/stm32* +F: arch/arm/mach-stm32/ F: drivers/clocksource/armv7m_systick.c
ARM/TANGO ARCHITECTURE @@@ -4636,7 -4624,7 +4636,7 @@@ F: include/uapi/drm/drm F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10 -M: Maxime Ripard maxime.ripard@free-electrons.com +M: Maxime Ripard maxime.ripard@bootlin.com L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ @@@ -7612,10 -7600,8 +7612,10 @@@ F: mm/kasan F: scripts/Makefile.kasan
KCONFIG +M: Masahiro Yamada yamada.masahiro@socionext.com +T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig L: linux-kbuild@vger.kernel.org -S: Orphan +S: Maintained F: Documentation/kbuild/kconfig-language.txt F: scripts/kconfig/
@@@ -8606,6 -8592,15 +8606,15 @@@ S: Maintaine F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 F: drivers/iio/potentiometer/mcp4531.c
+ MCR20A IEEE-802.15.4 RADIO DRIVER + M: Xue Liu liuxuenetmail@gmail.com + L: linux-wpan@vger.kernel.org + W: https://github.com/xueliu/mcr20a-linux + S: Maintained + F: drivers/net/ieee802154/mcr20a.c + F: drivers/net/ieee802154/mcr20a.h + F: Documentation/devicetree/bindings/net/ieee802154/mcr20a.txt + MEASUREMENT COMPUTING CIO-DAC IIO DRIVER M: William Breathitt Gray vilhelm.gray@gmail.com L: linux-iio@vger.kernel.org @@@ -8699,16 -8694,6 +8708,16 @@@ T: git git://linuxtv.org/media_tree.gi S: Supported F: drivers/media/pci/netup_unidvb/*
+MEDIA DRIVERS FOR RENESAS - CEU +M: Jacopo Mondi jacopo@jmondi.org +L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Supported +F: Documentation/devicetree/bindings/media/renesas,ceu.txt +F: drivers/media/platform/renesas-ceu.c +F: include/media/drv-intf/renesas-ceu.h + MEDIA DRIVERS FOR RENESAS - DRIF M: Ramesh Shanmugasundaram ramesh.shanmugasundaram@bp.renesas.com L: linux-media@vger.kernel.org @@@ -9104,6 -9089,20 +9113,6 @@@ F: drivers/media/platform/meson/ao-cec. F: Documentation/devicetree/bindings/media/meson-ao-cec.txt T: git git://linuxtv.org/media_tree.git
-METAG ARCHITECTURE -M: James Hogan jhogan@kernel.org -L: linux-metag@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git -S: Odd Fixes -F: arch/metag/ -F: Documentation/metag/ -F: Documentation/devicetree/bindings/metag/ -F: Documentation/devicetree/bindings/interrupt-controller/img,* -F: drivers/clocksource/metag_generic.c -F: drivers/irqchip/irq-metag.c -F: drivers/irqchip/irq-metag-ext.c -F: drivers/tty/metag_da.c - MICROBLAZE ARCHITECTURE M: Michal Simek monstr@monstr.eu W: http://www.monstr.eu/fdt/ @@@ -10183,13 -10182,6 +10192,13 @@@ T: git git://linuxtv.org/media_tree.gi S: Maintained F: drivers/media/i2c/ov13858.c
+OMNIVISION OV2685 SENSOR DRIVER +M: Shunqian Zheng zhengsq@rock-chips.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/ov2685.c + OMNIVISION OV5640 SENSOR DRIVER M: Steve Longerbeam slongerbeam@gmail.com L: linux-media@vger.kernel.org @@@ -10204,13 -10196,6 +10213,13 @@@ T: git git://linuxtv.org/media_tree.gi S: Maintained F: drivers/media/i2c/ov5647.c
+OMNIVISION OV5695 SENSOR DRIVER +M: Shunqian Zheng zhengsq@rock-chips.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/ov5695.c + OMNIVISION OV7670 SENSOR DRIVER M: Jonathan Corbet corbet@lwn.net L: linux-media@vger.kernel.org @@@ -10219,14 -10204,6 +10228,14 @@@ S: Maintaine F: drivers/media/i2c/ov7670.c F: Documentation/devicetree/bindings/media/i2c/ov7670.txt
+OMNIVISION OV772x SENSOR DRIVER +M: Jacopo Mondi jacopo@jmondi.org +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Odd fixes +F: drivers/media/i2c/ov772x.c +F: include/media/i2c/ov772x.h + OMNIVISION OV7740 SENSOR DRIVER M: Wenyou Yang wenyou.yang@microchip.com L: linux-media@vger.kernel.org @@@ -10235,16 -10212,6 +10244,16 @@@ S: Maintaine F: drivers/media/i2c/ov7740.c F: Documentation/devicetree/bindings/media/i2c/ov7740.txt
+OMNIVISION OV9650 SENSOR DRIVER +M: Sakari Ailus sakari.ailus@linux.intel.com +R: Akinobu Mita akinobu.mita@gmail.com +R: Sylwester Nawrocki s.nawrocki@samsung.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/ov9650.c +F: Documentation/devicetree/bindings/media/i2c/ov9650.txt + ONENAND FLASH DRIVER M: Kyungmin Park kyungmin.park@samsung.com L: linux-mtd@lists.infradead.org @@@ -10968,17 -10935,6 +10977,17 @@@ L: linux-gpio@vger.kernel.or S: Supported F: drivers/pinctrl/pinctrl-at91-pio4.*
+PIN CONTROLLER - FREESCALE +M: Dong Aisheng aisheng.dong@nxp.com +M: Fabio Estevam festevam@gmail.com +M: Shawn Guo shawnguo@kernel.org +M: Stefan Agner stefan@agner.ch +R: Pengutronix Kernel Team kernel@pengutronix.de +L: linux-gpio@vger.kernel.org +S: Maintained +F: drivers/pinctrl/freescale/ +F: Documentation/devicetree/bindings/pinctrl/fsl,* + PIN CONTROLLER - INTEL M: Mika Westerberg mika.westerberg@linux.intel.com M: Heikki Krogerus heikki.krogerus@linux.intel.com @@@ -12243,7 -12199,6 +12252,7 @@@ M: Tomasz Figa <tomasz.figa@gmail.com M: Chanwoo Choi cw00.choi@samsung.com S: Supported L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) +T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git F: drivers/clk/samsung/ F: include/dt-bindings/clock/exynos*.h F: Documentation/devicetree/bindings/clock/exynos*.txt @@@ -13558,14 -13513,6 +13567,14 @@@ T: git git://linuxtv.org/mkrufky/tuners S: Maintained F: drivers/media/tuners/tda18271*
+TDA1997x MEDIA DRIVER +M: Tim Harvey tharvey@gateworks.com +L: linux-media@vger.kernel.org +W: https://linuxtv.org +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +S: Maintained +F: drivers/media/i2c/tda1997x.* + TDA827x MEDIA DRIVER M: Michael Krufky mkrufky@linuxtv.org L: linux-media@vger.kernel.org @@@ -13647,12 -13594,6 +13656,12 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/rc/ttusbir.c
+TECHWELL TW9910 VIDEO DECODER +L: linux-media@vger.kernel.org +S: Orphan +F: drivers/media/i2c/tw9910.c +F: include/media/i2c/tw9910.h + TEE SUBSYSTEM M: Jens Wiklander jens.wiklander@linaro.org S: Maintained diff --combined arch/m68k/mac/config.c index 36086cceb537,c73eb8209555..0c3275aa0197 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@@ -26,7 -26,6 +26,7 @@@ #include <linux/platform_device.h> #include <linux/adb.h> #include <linux/cuda.h> +#include <linux/pmu.h> #include <linux/rtc.h>
#include <asm/setup.h> @@@ -700,7 -699,7 +700,7 @@@ static struct mac_model mac_data_table[ .name = "PowerBook 190", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, - .scsi_type = MAC_SCSI_LATE, + .scsi_type = MAC_SCSI_OLD, .ide_type = MAC_IDE_BABOON, .scc_type = MAC_SCC_QUADRA, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@@ -891,9 -890,6 +891,9 @@@ static void __init mac_identify(void #ifdef CONFIG_ADB_CUDA find_via_cuda(); #endif +#ifdef CONFIG_ADB_PMU68K + find_via_pmu(); +#endif }
static void __init mac_report_hardware(void) @@@ -1065,7 -1061,9 +1065,7 @@@ int __init mac_platform_init(void mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc)); break; case MAC_SCSI_LATE: - /* PDMA logic in 68040 PowerBooks is somehow different to - * '030 models. It's probably more like Quadras (see mac_esp). - */ + /* XXX PDMA support for PowerBook 500 series needs testing */ platform_device_register_simple("mac_scsi", 0, mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc)); break; @@@ -1090,6 -1088,10 +1090,10 @@@ macintosh_config->expansion_type == MAC_EXP_PDS_COMM) platform_device_register_simple("macsonic", -1, NULL, 0);
+ if (macintosh_config->expansion_type == MAC_EXP_PDS || + macintosh_config->expansion_type == MAC_EXP_PDS_COMM) + platform_device_register_simple("mac89x0", -1, NULL, 0); + if (macintosh_config->ether_type == MAC_ETHER_MACE) platform_device_register_simple("macmace", -1, NULL, 0);
diff --combined drivers/bluetooth/btusb.c index 60bf04b8f103,c8e9ae6b99e1..fa4ce83893bb --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@@ -21,7 -21,6 +21,7 @@@ * */
+#include <linux/dmi.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/quirks.h> @@@ -340,6 -339,7 +340,7 @@@ static const struct usb_device_id black
/* Intel Bluetooth devices */ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW }, + { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, @@@ -374,27 -374,15 +375,30 @@@ { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822BE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
{ } /* Terminating entry */ };
+/* The Bluetooth USB module build into some devices needs to be reset on resume, + * this is a problem with the platform (likely shutting off all power) not with + * the module itself. So we use a DMI list to match known broken platforms. + */ +static const struct dmi_system_id btusb_needs_reset_resume_table[] = { + { + /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"), + }, + }, + {} +}; + #define BTUSB_MAX_ISOC_FRAMES 10
#define BTUSB_INTR_RUNNING 0 @@@ -2073,6 -2061,8 +2077,8 @@@ static int btusb_setup_intel_new(struc case 0x0c: /* WsP */ case 0x11: /* JfP */ case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* QnJ, IcP */ break; default: BT_ERR("%s: Unsupported Intel hardware variant (%u)", @@@ -2165,6 -2155,8 +2171,8 @@@ break; case 0x11: /* JfP */ case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* QnJ, IcP */ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", le16_to_cpu(ver.hw_variant), le16_to_cpu(ver.hw_revision), @@@ -2196,6 -2188,8 +2204,8 @@@ break; case 0x11: /* JfP */ case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* QnJ, IcP */ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", le16_to_cpu(ver.hw_variant), le16_to_cpu(ver.hw_revision), @@@ -2961,9 -2955,6 +2971,9 @@@ static int btusb_probe(struct usb_inter hdev->send = btusb_send_frame; hdev->notify = btusb_notify;
+ if (dmi_check_system(btusb_needs_reset_resume_table)) + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; + #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); if (err) @@@ -3050,6 -3041,12 +3060,6 @@@ if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; - - /* QCA Rome devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * explicitly request a device reset on resume. - */ - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; }
#ifdef CONFIG_BT_HCIBTUSB_RTL diff --combined drivers/infiniband/hw/mlx5/cq.c index b5cfdaa9c7c8,c4c7b82f4ac1..0ddce3114162 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@@ -64,14 -64,9 +64,9 @@@ static void mlx5_ib_cq_event(struct mlx } }
- static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) - { - return mlx5_buf_offset(&buf->buf, n * size); - } - static void *get_cqe(struct mlx5_ib_cq *cq, int n) { - return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); + return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); }
static u8 sw_ownership_bit(int n, int nent) @@@ -226,6 -221,7 +221,6 @@@ static void handle_responder(struct ib_ wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@@ -240,12 -236,10 +235,12 @@@ }
if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; }
+ wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { @@@ -404,7 -398,7 +399,7 @@@ static void handle_atomics(struct mlx5_
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) { - mlx5_buf_free(dev->mdev, &buf->buf); + mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf); }
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, @@@ -725,12 -719,25 +720,25 @@@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, return ret; }
- static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, - int nent, int cqe_size) + static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev, + struct mlx5_ib_cq_buf *buf, + int nent, + int cqe_size) { + struct mlx5_frag_buf_ctrl *c = &buf->fbc; + struct mlx5_frag_buf *frag_buf = &c->frag_buf; + u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0}; int err;
- err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); + MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size)); + MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0); + + mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff); + + err = mlx5_frag_buf_alloc_node(dev->mdev, + nent * cqe_size, + frag_buf, + dev->mdev->priv.numa_node); if (err) return err;
@@@ -863,14 -870,15 +871,15 @@@ static void destroy_cq_user(struct mlx5 ib_umem_release(cq->buf.umem); }
- static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) + static void init_cq_frag_buf(struct mlx5_ib_cq *cq, + struct mlx5_ib_cq_buf *buf) { int i; void *cqe; struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) { - cqe = get_cqe_from_buf(buf, i, buf->cqe_size); + cqe = get_cqe(cq, i); cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; cqe64->op_own = MLX5_CQE_INVALID << 4; } @@@ -892,14 -900,15 +901,15 @@@ static int create_cq_kernel(struct mlx5 cq->mcq.arm_db = cq->db.db + 1; cq->mcq.cqe_sz = cqe_size;
- err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); if (err) goto err_db;
- init_cq_buf(cq, &cq->buf); + init_cq_frag_buf(cq, &cq->buf);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * + cq->buf.fbc.frag_buf.npages; *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; @@@ -907,11 -916,12 +917,12 @@@ }
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); - mlx5_fill_page_array(&cq->buf.buf, pas); + mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); MLX5_SET(cqc, cqc, log_page_size, - cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + cq->buf.fbc.frag_buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uar->index;
@@@ -1208,11 -1218,11 +1219,11 @@@ static int resize_kernel(struct mlx5_ib if (!cq->resize_buf) return -ENOMEM;
- err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); + err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); if (err) goto ex;
- init_cq_buf(cq, cq->resize_buf); + init_cq_frag_buf(cq, cq->resize_buf);
return 0;
@@@ -1257,9 -1267,8 +1268,8 @@@ static int copy_resize_cqes(struct mlx5 }
while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { - dcqe = get_cqe_from_buf(cq->resize_buf, - (i + 1) & (cq->resize_buf->nent), - dsize); + dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, + (i + 1) & cq->resize_buf->nent); dcqe64 = dsize == 64 ? dcqe : dcqe + 64; sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); memcpy(dcqe, scqe, dsize); @@@ -1325,8 -1334,11 +1335,11 @@@ int mlx5_ib_resize_cq(struct ib_cq *ibc cqe_size = 64; err = resize_kernel(dev, cq, entries, cqe_size); if (!err) { - npas = cq->resize_buf->buf.npages; - page_shift = cq->resize_buf->buf.page_shift; + struct mlx5_frag_buf_ctrl *c; + + c = &cq->resize_buf->fbc; + npas = c->frag_buf.npages; + page_shift = c->frag_buf.page_shift; } }
@@@ -1347,7 -1359,8 +1360,8 @@@ mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, pas, 0); else - mlx5_fill_page_array(&cq->resize_buf->buf, pas); + mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf, + pas);
MLX5_SET(modify_cq_in, in, modify_field_select_resize_field_select.resize_field_select.resize_field_select, diff --combined drivers/infiniband/hw/mlx5/main.c index bab38c6647d7,ee55d7d64554..d04a3bd625e4 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -57,6 -57,7 +57,7 @@@ #include <linux/in.h> #include <linux/etherdevice.h> #include "mlx5_ib.h" + #include "ib_rep.h" #include "cmd.h"
#define DRIVER_NAME "mlx5_ib" @@@ -130,7 -131,7 +131,7 @@@ static int get_port_state(struct ib_dev int ret;
memset(&attr, 0, sizeof(attr)); - ret = mlx5_ib_query_port(ibdev, port_num, &attr); + ret = ibdev->query_port(ibdev, port_num, &attr); if (!ret) *state = attr.state; return ret; @@@ -154,10 -155,19 +155,19 @@@ static int mlx5_netdev_event(struct not case NETDEV_REGISTER: case NETDEV_UNREGISTER: write_lock(&roce->netdev_lock); - - if (ndev->dev.parent == &mdev->pdev->dev) - roce->netdev = (event == NETDEV_UNREGISTER) ? + if (ibdev->rep) { + struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch; + struct net_device *rep_ndev; + + rep_ndev = mlx5_ib_get_rep_netdev(esw, + ibdev->rep->vport); + if (rep_ndev == ndev) + roce->netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; + } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) { + roce->netdev = (event == NETDEV_UNREGISTER) ? + NULL : ndev; + } write_unlock(&roce->netdev_lock); break;
@@@ -1268,6 -1278,22 +1278,22 @@@ int mlx5_ib_query_port(struct ib_devic return ret; }
+ static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) + { + int ret; + + /* Only link layer == ethernet is valid for representors */ + ret = mlx5_query_port_roce(ibdev, port, props); + if (ret || !props) + return ret; + + /* We don't support GIDS */ + props->gid_tbl_len = 0; + + return ret; + } + static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { @@@ -2631,7 -2657,7 +2657,7 @@@ static int mlx5_ib_destroy_flow(struct ibflow); struct mlx5_ib_flow_handler *iter, *tmp;
- mutex_lock(&dev->flow_db.lock); + mutex_lock(&dev->flow_db->lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { mlx5_del_flow_rules(iter->rule); @@@ -2642,7 -2668,7 +2668,7 @@@
mlx5_del_flow_rules(handler->rule); put_flow_table(dev, handler->prio, true); - mutex_unlock(&dev->flow_db.lock); + mutex_unlock(&dev->flow_db->lock);
kfree(handler);
@@@ -2691,7 -2717,7 +2717,7 @@@ static struct mlx5_ib_flow_prio *get_fl MLX5_FLOW_NAMESPACE_BYPASS); num_entries = MLX5_FS_MAX_ENTRIES; num_groups = MLX5_FS_MAX_TYPES; - prio = &dev->flow_db.prios[priority]; + prio = &dev->flow_db->prios[priority]; } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { ns = mlx5_get_flow_namespace(dev->mdev, @@@ -2699,7 -2725,7 +2725,7 @@@ build_leftovers_ft_param(&priority, &num_entries, &num_groups); - prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; + prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { if (!MLX5_CAP_FLOWTABLE(dev->mdev, allow_sniffer_and_nic_rx_shared_tir)) @@@ -2709,7 -2735,7 +2735,7 @@@ MLX5_FLOW_NAMESPACE_SNIFFER_RX : MLX5_FLOW_NAMESPACE_SNIFFER_TX);
- prio = &dev->flow_db.sniffer[ft_type]; + prio = &dev->flow_db->sniffer[ft_type]; priority = 0; num_entries = 1; num_groups = 1; @@@ -2802,6 -2828,18 +2828,18 @@@ static struct mlx5_ib_flow_handler *_cr if (!flow_is_multicast_only(flow_attr)) set_underlay_qp(dev, spec, underlay_qpn);
+ if (dev->rep) { + void *misc; + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, + dev->rep->vport); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + } + spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); if (is_drop) { flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; @@@ -2999,7 -3037,7 +3037,7 @@@ static struct ib_flow *mlx5_ib_create_f if (!dst) return ERR_PTR(-ENOMEM);
- mutex_lock(&dev->flow_db.lock); + mutex_lock(&dev->flow_db->lock);
ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); if (IS_ERR(ft_prio)) { @@@ -3048,7 -3086,7 +3086,7 @@@ goto destroy_ft; }
- mutex_unlock(&dev->flow_db.lock); + mutex_unlock(&dev->flow_db->lock); kfree(dst);
return &handler->ibflow; @@@ -3058,7 -3096,7 +3096,7 @@@ destroy_ft if (ft_prio_tx) put_flow_table(dev, ft_prio_tx, false); unlock: - mutex_unlock(&dev->flow_db.lock); + mutex_unlock(&dev->flow_db->lock); kfree(dst); kfree(handler); return ERR_PTR(err); @@@ -3263,7 -3301,7 +3301,7 @@@ static void mlx5_ib_handle_event(struc struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; - u8 port = 0; + u8 port = (u8)work->param;
if (mlx5_core_is_mp_slave(work->dev)) { ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@@ -3283,6 -3321,8 +3321,6 @@@ case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_INITIALIZED: - port = (u8)work->param; - /* In RoCE, port up/down events are handled in * mlx5_netdev_event(). */ @@@ -3296,19 -3336,24 +3334,19 @@@
case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; - port = (u8)work->param; break;
case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; - port = (u8)work->param; - schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break;
case MLX5_DEV_EVENT_GUID_CHANGE: ibev.event = IB_EVENT_GID_CHANGE; - port = (u8)work->param; break;
case MLX5_DEV_EVENT_CLIENT_REREG: ibev.event = IB_EVENT_CLIENT_REREGISTER; - port = (u8)work->param; break; case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: schedule_work(&ibdev->delay_drop.delay_drop_work); @@@ -3320,7 -3365,7 +3358,7 @@@ ibev.device = &ibdev->ib_dev; ibev.element.port_num = port;
- if (port < 1 || port > ibdev->num_ports) { + if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); goto out; } @@@ -3765,6 -3810,25 +3803,25 @@@ static int mlx5_port_immutable(struct i return 0; }
+ static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) + { + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; + + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; + + return 0; + } + static void get_dev_fw_str(struct ib_device *ibdev, char *str) { struct mlx5_ib_dev *dev = @@@ -3795,7 -3859,7 +3852,7 @@@ static int mlx5_eth_lag_init(struct mlx goto err_destroy_vport_lag; }
- dev->flow_db.lag_demux_ft = ft; + dev->flow_db->lag_demux_ft = ft; return 0;
err_destroy_vport_lag: @@@ -3807,9 -3871,9 +3864,9 @@@ static void mlx5_eth_lag_cleanup(struc { struct mlx5_core_dev *mdev = dev->mdev;
- if (dev->flow_db.lag_demux_ft) { - mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft); - dev->flow_db.lag_demux_ft = NULL; + if (dev->flow_db->lag_demux_ft) { + mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); + dev->flow_db->lag_demux_ft = NULL;
mlx5_cmd_destroy_vport_lag(mdev); } @@@ -3841,14 -3905,10 +3898,10 @@@ static int mlx5_enable_eth(struct mlx5_ { int err;
- err = mlx5_add_netdev_notifier(dev, port_num); - if (err) - return err; - if (MLX5_CAP_GEN(dev->mdev, roce)) { err = mlx5_nic_vport_enable_roce(dev->mdev); if (err) - goto err_unregister_netdevice_notifier; + return err; }
err = mlx5_eth_lag_init(dev); @@@ -3861,8 -3921,6 +3914,6 @@@ err_disable_roce if (MLX5_CAP_GEN(dev->mdev, roce)) mlx5_nic_vport_disable_roce(dev->mdev);
- err_unregister_netdevice_notifier: - mlx5_remove_netdev_notifier(dev, port_num); return err; }
@@@ -4496,7 -4554,7 +4547,7 @@@ static void mlx5_ib_cleanup_multiport_m mlx5_nic_vport_disable_roce(dev->mdev); }
- static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) + void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_cleanup_multiport_master(dev); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING @@@ -4505,7 -4563,7 +4556,7 @@@ kfree(dev->port); }
- static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; const char *name; @@@ -4557,7 -4615,6 +4608,6 @@@ dev->mdev->priv.eq_table.num_comp_vectors; dev->ib_dev.dev.parent = &mdev->pdev->dev;
- mutex_init(&dev->flow_db.lock); mutex_init(&dev->cap_mask_mutex); INIT_LIST_HEAD(&dev->qp_list); spin_lock_init(&dev->reset_flow_resource_lock); @@@ -4578,7 -4635,38 +4628,38 @@@ err_free_port return -ENOMEM; }
- static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) + static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) + { + dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); + + if (!dev->flow_db) + return -ENOMEM; + + mutex_init(&dev->flow_db->lock); + + return 0; + } + + int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev) + { + struct mlx5_ib_dev *nic_dev; + + nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch); + + if (!nic_dev) + return -EINVAL; + + dev->flow_db = nic_dev->flow_db; + + return 0; + } + + static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) + { + kfree(dev->flow_db); + } + + int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; int err; @@@ -4619,7 -4707,6 +4700,6 @@@ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
dev->ib_dev.query_device = mlx5_ib_query_device; - dev->ib_dev.query_port = mlx5_ib_query_port; dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; dev->ib_dev.query_gid = mlx5_ib_query_gid; dev->ib_dev.add_gid = mlx5_ib_add_gid; @@@ -4662,7 -4749,6 +4742,6 @@@ dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; - dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) @@@ -4713,6 -4799,80 +4792,80 @@@ return 0; }
+ static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) + { + dev->ib_dev.get_port_immutable = mlx5_port_immutable; + dev->ib_dev.query_port = mlx5_ib_query_port; + + return 0; + } + + int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) + { + dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable; + dev->ib_dev.query_port = mlx5_ib_rep_query_port; + + return 0; + } + + static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev, + u8 port_num) + { + int i; + + for (i = 0; i < dev->num_ports; i++) { + dev->roce[i].dev = dev; + dev->roce[i].native_port_num = i + 1; + dev->roce[i].last_port_state = IB_PORT_DOWN; + } + + dev->ib_dev.get_netdev = mlx5_ib_get_netdev; + dev->ib_dev.create_wq = mlx5_ib_create_wq; + dev->ib_dev.modify_wq = mlx5_ib_modify_wq; + dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; + dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; + dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; + + dev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | + (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); + + return mlx5_add_netdev_notifier(dev, port_num); + } + + static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev) + { + u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; + + mlx5_remove_netdev_notifier(dev, port_num); + } + + int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) + { + struct mlx5_core_dev *mdev = dev->mdev; + enum rdma_link_layer ll; + int port_type_cap; + int err = 0; + u8 port_num; + + port_num = mlx5_core_native_port_num(dev->mdev) - 1; + port_type_cap = MLX5_CAP_GEN(mdev, port_type); + ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); + + if (ll == IB_LINK_LAYER_ETHERNET) + err = mlx5_ib_stage_common_roce_init(dev, port_num); + + return err; + } + + void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) + { + mlx5_ib_stage_common_roce_cleanup(dev); + } + static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; @@@ -4720,37 -4880,26 +4873,26 @@@ int port_type_cap; u8 port_num; int err; - int i;
port_num = mlx5_core_native_port_num(dev->mdev) - 1; port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) { - for (i = 0; i < dev->num_ports; i++) { - dev->roce[i].dev = dev; - dev->roce[i].native_port_num = i + 1; - dev->roce[i].last_port_state = IB_PORT_DOWN; - } + err = mlx5_ib_stage_common_roce_init(dev, port_num); + if (err) + return err;
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev; - dev->ib_dev.create_wq = mlx5_ib_create_wq; - dev->ib_dev.modify_wq = mlx5_ib_modify_wq; - dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; - dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; - dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; - dev->ib_dev.uverbs_ex_cmd_mask |= - (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | - (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); err = mlx5_enable_eth(dev, port_num); if (err) - return err; + goto cleanup; }
return 0; + cleanup: + mlx5_ib_stage_common_roce_cleanup(dev); + + return err; }
static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev) @@@ -4766,16 -4915,16 +4908,16 @@@
if (ll == IB_LINK_LAYER_ETHERNET) { mlx5_disable_eth(dev); - mlx5_remove_netdev_notifier(dev, port_num); + mlx5_ib_stage_common_roce_cleanup(dev); } }
- static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) { return create_dev_resources(&dev->devr); }
- static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) + void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) { destroy_dev_resources(&dev->devr); } @@@ -4787,7 -4936,7 +4929,7 @@@ static int mlx5_ib_stage_odp_init(struc return mlx5_ib_odp_init_one(dev); }
- static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; @@@ -4799,7 -4948,7 +4941,7 @@@ return 0; }
- static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) + void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) mlx5_ib_dealloc_counters(dev); @@@ -4830,7 -4979,7 +4972,7 @@@ static void mlx5_ib_stage_uar_cleanup(s mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); }
- static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) { int err;
@@@ -4845,28 -4994,28 +4987,28 @@@ return err; }
- static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) + void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) { mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); mlx5_free_bfreg(dev->mdev, &dev->bfreg); }
- static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { return ib_register_device(&dev->ib_dev, NULL); }
- static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) + void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) { ib_unregister_device(&dev->ib_dev); }
- static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) { return create_umr_res(dev); }
- static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) + void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) { destroy_umrc_res(dev); } @@@ -4883,7 -5032,7 +5025,7 @@@ static void mlx5_ib_stage_delay_drop_cl cancel_delay_drop(dev); }
- static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev) + int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev) { int err; int i; @@@ -4898,9 -5047,21 +5040,21 @@@ return 0; }
- static void __mlx5_ib_remove(struct mlx5_ib_dev *dev, - const struct mlx5_ib_profile *profile, - int stage) + static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev) + { + mlx5_ib_register_vport_reps(dev); + + return 0; + } + + static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev) + { + mlx5_ib_unregister_vport_reps(dev); + } + + void __mlx5_ib_remove(struct mlx5_ib_dev *dev, + const struct mlx5_ib_profile *profile, + int stage) { /* Number of stages to cleanup */ while (stage) { @@@ -4914,23 -5075,14 +5068,14 @@@
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
- static void *__mlx5_ib_add(struct mlx5_core_dev *mdev, - const struct mlx5_ib_profile *profile) + void *__mlx5_ib_add(struct mlx5_ib_dev *dev, + const struct mlx5_ib_profile *profile) { - struct mlx5_ib_dev *dev; int err; int i;
printk_once(KERN_INFO "%s", mlx5_version);
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); - if (!dev) - return NULL; - - dev->mdev = mdev; - dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), - MLX5_CAP_GEN(mdev, num_vhca_ports)); - for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { if (profile->stage[i].init) { err = profile->stage[i].init(dev); @@@ -4954,9 -5106,15 +5099,15 @@@ static const struct mlx5_ib_profile pf_ STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, + mlx5_ib_stage_flow_db_init, + mlx5_ib_stage_flow_db_cleanup), STAGE_CREATE(MLX5_IB_STAGE_CAPS, mlx5_ib_stage_caps_init, NULL), + STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, + mlx5_ib_stage_non_default_cb, + NULL), STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_stage_roce_init, mlx5_ib_stage_roce_cleanup), @@@ -4992,6 -5150,48 +5143,48 @@@ NULL), };
+ static const struct mlx5_ib_profile nic_rep_profile = { + STAGE_CREATE(MLX5_IB_STAGE_INIT, + mlx5_ib_stage_init_init, + mlx5_ib_stage_init_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, + mlx5_ib_stage_flow_db_init, + mlx5_ib_stage_flow_db_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_CAPS, + mlx5_ib_stage_caps_init, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, + mlx5_ib_stage_rep_non_default_cb, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_ROCE, + mlx5_ib_stage_rep_roce_init, + mlx5_ib_stage_rep_roce_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, + mlx5_ib_stage_dev_res_init, + mlx5_ib_stage_dev_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, + mlx5_ib_stage_counters_init, + mlx5_ib_stage_counters_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_UAR, + mlx5_ib_stage_uar_init, + mlx5_ib_stage_uar_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_BFREG, + mlx5_ib_stage_bfrag_init, + mlx5_ib_stage_bfrag_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_IB_REG, + mlx5_ib_stage_ib_reg_init, + mlx5_ib_stage_ib_reg_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, + mlx5_ib_stage_umr_res_init, + mlx5_ib_stage_umr_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, + mlx5_ib_stage_class_attr_init, + NULL), + STAGE_CREATE(MLX5_IB_STAGE_REP_REG, + mlx5_ib_stage_rep_reg_init, + mlx5_ib_stage_rep_reg_cleanup), + }; + static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num) { struct mlx5_ib_multiport_info *mpi; @@@ -5037,8 -5237,11 +5230,11 @@@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { enum rdma_link_layer ll; + struct mlx5_ib_dev *dev; int port_type_cap;
+ printk_once(KERN_INFO "%s", mlx5_version); + port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@@ -5048,7 -5251,22 +5244,22 @@@ return mlx5_ib_add_slave_port(mdev, port_num); }
- return __mlx5_ib_add(mdev, &pf_profile); + dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) + return NULL; + + dev->mdev = mdev; + dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), + MLX5_CAP_GEN(mdev, num_vhca_ports)); + + if (MLX5_VPORT_MANAGER(mdev) && + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { + dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); + + return __mlx5_ib_add(dev, &nic_rep_profile); + } + + return __mlx5_ib_add(dev, &pf_profile); }
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) diff --combined drivers/infiniband/hw/mlx5/mr.c index 1961c6a45437,a5fad3e87ff7..05c6905c82d2 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@@ -587,7 -587,7 +587,7 @@@ static void clean_keys(struct mlx5_ib_d
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) { - if (!mlx5_debugfs_root) + if (!mlx5_debugfs_root || dev->rep) return;
debugfs_remove_recursive(dev->cache.root); @@@ -600,7 -600,7 +600,7 @@@ static int mlx5_mr_cache_debugfs_init(s struct mlx5_cache_ent *ent; int i;
- if (!mlx5_debugfs_root) + if (!mlx5_debugfs_root || dev->rep) return 0;
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); @@@ -690,6 -690,7 +690,7 @@@ int mlx5_mr_cache_init(struct mlx5_ib_d MLX5_IB_UMR_OCTOWORD; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && + !dev->rep && mlx5_core_is_pf(dev->mdev)) ent->limit = dev->mdev->profile->mr_cache[i].limit; else @@@ -1816,6 -1817,7 +1817,6 @@@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *m
mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@@ -1827,7 -1829,6 +1828,7 @@@
sg_offset = 0; } + mr->ndescs = i;
if (sg_offset_p) *sg_offset_p = sg_offset; diff --combined drivers/infiniband/hw/mlx5/qp.c index e8d7eaf0670c,5663530ea5fd..6e019230ab1a --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@@ -36,6 -36,7 +36,7 @@@ #include <rdma/ib_user_verbs.h> #include <linux/mlx5/fs.h> #include "mlx5_ib.h" + #include "ib_rep.h"
/* not supported currently */ static int wq_signature; @@@ -1082,6 -1083,13 +1083,13 @@@ static void destroy_raw_packet_qp_tis(s mlx5_core_destroy_tis(dev->mdev, sq->tisn); }
+ static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev, + struct mlx5_ib_sq *sq) + { + if (sq->flow_rule) + mlx5_del_flow_rules(sq->flow_rule); + } + static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, void *qpin, struct ib_pd *pd) @@@ -1145,8 -1153,15 +1153,15 @@@ if (err) goto err_umem;
+ err = create_flow_rule_vport_sq(dev, sq); + if (err) + goto err_flow; + return 0;
+ err_flow: + mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); + err_umem: ib_umem_release(sq->ubuffer.umem); sq->ubuffer.umem = NULL; @@@ -1157,6 -1172,7 +1172,7 @@@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq) { + destroy_flow_rule_vport_sq(dev, sq); mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); ib_umem_release(sq->ubuffer.umem); } @@@ -1263,6 -1279,10 +1279,10 @@@ static int create_raw_packet_qp_tir(str if (tunnel_offload_en) MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+ if (dev->rep) + MLX5_SET(tirc, tirc, self_lb_block, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); + err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
kvfree(in); @@@ -1554,6 -1574,10 +1574,10 @@@ static int create_rss_raw_qp_tir(struc MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir: + if (dev->rep) + MLX5_SET(tirc, tirc, self_lb_block, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); + err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
if (err) @@@ -1584,7 -1608,6 +1608,7 @@@ static int create_qp_common(struct mlx5 u32 uidx = MLX5_IB_DEFAULT_UIDX; struct mlx5_ib_create_qp ucmd; struct mlx5_ib_qp_base *base; + int mlx5_st; void *qpc; u32 *in; int err; @@@ -1593,10 -1616,6 +1617,10 @@@ spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock);
+ mlx5_st = to_mlx5_st(init_attr->qp_type); + if (mlx5_st < 0) + return -EINVAL; + if (init_attr->rwq_ind_tbl) { if (!udata) return -ENOSYS; @@@ -1758,7 -1777,7 +1782,7 @@@
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); + MLX5_SET(qpc, qpc, st, mlx5_st); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9fc063af233c,b032091022a8..85369423452d --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -1888,14 -1888,6 +1888,14 @@@ static void ixgbe_dma_sync_frag(struct ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@@ -7711,7 -7703,8 +7711,8 @@@ static void ixgbe_service_task(struct w
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); - ixgbe_ptp_rx_hang(adapter); + if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) + ixgbe_ptp_rx_hang(adapter); ixgbe_ptp_tx_hang(adapter); }
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c7e941aecc2a,7c6204f701ae..7884e8a2de35 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -1,6 -1,6 +1,6 @@@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2017 Jiri Pirko jiri@mellanox.com * Copyright (c) 2015 Ido Schimmel idosch@mellanox.com * Copyright (c) 2015 Elad Raz eladr@mellanox.com @@@ -71,6 -71,7 +71,7 @@@ #include "spectrum_cnt.h" #include "spectrum_dpipe.h" #include "spectrum_acl_flex_actions.h" + #include "spectrum_span.h" #include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13 @@@ -487,327 -488,6 +488,6 @@@ static int mlxsw_sp_base_mac_get(struc return 0; }
- static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) - { - int i; - - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) - return -EIO; - - mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, - MAX_SPAN); - mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, - sizeof(struct mlxsw_sp_span_entry), - GFP_KERNEL); - if (!mlxsw_sp->span.entries) - return -ENOMEM; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) - INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); - - return 0; - } - - static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) - { - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); - } - kfree(mlxsw_sp->span.entries); - } - - static struct mlxsw_sp_span_entry * - mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) - { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - struct mlxsw_sp_span_entry *span_entry; - char mpat_pl[MLXSW_REG_MPAT_LEN]; - u8 local_port = port->local_port; - int index; - int i; - int err; - - /* find a free entry to use */ - index = -1; - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - if (!mlxsw_sp->span.entries[i].used) { - index = i; - span_entry = &mlxsw_sp->span.entries[i]; - break; - } - } - if (index < 0) - return NULL; - - /* create a new port analayzer entry for local_port */ - mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); - if (err) - return NULL; - - span_entry->used = true; - span_entry->id = index; - span_entry->ref_count = 1; - span_entry->local_port = local_port; - return span_entry; - } - - static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_span_entry *span_entry) - { - u8 local_port = span_entry->local_port; - char mpat_pl[MLXSW_REG_MPAT_LEN]; - int pa_id = span_entry->id; - - mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); - span_entry->used = false; - } - - struct mlxsw_sp_span_entry * - mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) - { - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - if (curr->used && curr->local_port == local_port) - return curr; - } - return NULL; - } - - static struct mlxsw_sp_span_entry - *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) - { - struct mlxsw_sp_span_entry *span_entry; - - span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, - port->local_port); - if (span_entry) { - /* Already exists, just take a reference */ - span_entry->ref_count++; - return span_entry; - } - - return mlxsw_sp_span_entry_create(port); - } - - static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_span_entry *span_entry) - { - WARN_ON(!span_entry->ref_count); - if (--span_entry->ref_count == 0) - mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); - return 0; - } - - static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) - { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - struct mlxsw_sp_span_inspected_port *p; - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - list_for_each_entry(p, &curr->bound_ports_list, list) - if (p->local_port == port->local_port && - p->type == MLXSW_SP_SPAN_EGRESS) - return true; - } - - return false; - } - - static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, - int mtu) - { - return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; - } - - static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) - { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - int err; - - /* If port is egress mirrored, the shared buffer size should be - * updated according to the mtu value - */ - if (mlxsw_sp_span_is_egress_mirror(port)) { - u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); - - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - if (err) { - netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); - return err; - } - } - - return 0; - } - - static struct mlxsw_sp_span_inspected_port * - mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry) - { - struct mlxsw_sp_span_inspected_port *p; - - list_for_each_entry(p, &span_entry->bound_ports_list, list) - if (port->local_port == p->local_port) - return p; - return NULL; - } - - static int - mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) - { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char mpar_pl[MLXSW_REG_MPAR_LEN]; - int pa_id = span_entry->id; - - /* bind the port to the SPAN entry */ - mlxsw_reg_mpar_pack(mpar_pl, port->local_port, - (enum mlxsw_reg_mpar_i_e) type, bind, pa_id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); - } - - static int - mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) - { - struct mlxsw_sp_span_inspected_port *inspected_port; - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - int err; - - /* if it is an egress SPAN, bind a shared buffer to it */ - if (type == MLXSW_SP_SPAN_EGRESS) { - u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, - port->dev->mtu); - - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - if (err) { - netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); - return err; - } - } - - if (bind) { - err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - true); - if (err) - goto err_port_bind; - } - - inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); - if (!inspected_port) { - err = -ENOMEM; - goto err_inspected_port_alloc; - } - inspected_port->local_port = port->local_port; - inspected_port->type = type; - list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); - - return 0; - - err_inspected_port_alloc: - if (bind) - mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - false); - err_port_bind: - if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - } - return err; - } - - static void - mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) - { - struct mlxsw_sp_span_inspected_port *inspected_port; - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - - inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); - if (!inspected_port) - return; - - if (bind) - mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - false); - /* remove the SBIB buffer if it was egress SPAN */ - if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - } - - mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); - - list_del(&inspected_port->list); - kfree(inspected_port); - } - - int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type, bool bind) - { - struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; - struct mlxsw_sp_span_entry *span_entry; - int err; - - span_entry = mlxsw_sp_span_entry_get(to); - if (!span_entry) - return -ENOENT; - - netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", - span_entry->id); - - err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); - if (err) - goto err_port_bind; - - return 0; - - err_port_bind: - mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); - return err; - } - - void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port, - enum mlxsw_sp_span_type type, bool bind) - { - struct mlxsw_sp_span_entry *span_entry; - - span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, - destination_port); - if (!span_entry) { - netdev_err(from->dev, "no span entry found\n"); - return; - } - - netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", - span_entry->id); - mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); - } - static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable, u32 rate) { @@@ -1360,6 -1040,16 +1040,16 @@@ mlxsw_sp_port_get_hw_xstats(struct net_ xstats->tail_drop[i] = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, + i, ppcnt_pl); + if (err) + continue; + + xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); + xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); + } }
static void update_stats_cache(struct work_struct *work) @@@ -1459,7 -1149,6 +1149,7 @@@ mlxsw_sp_port_vlan_create(struct mlxsw_ }
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; + mlxsw_sp_port_vlan->ref_count = 1; mlxsw_sp_port_vlan->vid = vid; list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
@@@ -1487,10 -1176,8 +1177,10 @@@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (mlxsw_sp_port_vlan) + if (mlxsw_sp_port_vlan) { + mlxsw_sp_port_vlan->ref_count++; return mlxsw_sp_port_vlan; + }
return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); } @@@ -1499,9 -1186,6 +1189,9 @@@ void mlxsw_sp_port_vlan_put(struct mlxs { struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ if (--mlxsw_sp_port_vlan->ref_count != 0) + return; + if (mlxsw_sp_port_vlan->bridge_port) mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); else if (fid) @@@ -1584,7 -1268,6 +1274,6 @@@ mlxsw_sp_port_add_cls_matchall_mirror(s bool ingress) { enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; struct net_device *to_dev;
to_dev = tcf_mirred_dev(a); @@@ -1593,17 -1276,10 +1282,10 @@@ return -EINVAL; }
- if (!mlxsw_sp_port_dev_check(to_dev)) { - netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); - return -EOPNOTSUPP; - } - to_port = netdev_priv(to_dev); - - mirror->to_local_port = to_port->local_port; mirror->ingress = ingress; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type, - true); + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, + true, &mirror->span_id); }
static void @@@ -1614,7 -1290,7 +1296,7 @@@ mlxsw_sp_port_del_cls_matchall_mirror(s
span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port, + mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, span_type, true); }
@@@ -4001,14 -3677,24 +3683,24 @@@ static int mlxsw_sp_init(struct mlxsw_c goto err_afa_init; }
+ err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + + /* Initialize router after SPAN is initialized, so that the FIB and + * neighbor event handlers can issue SPAN respin. + */ err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); goto err_router_init; }
- /* Initialize netdevice notifier after router is initialized, so that - * the event handler can use router structures. + /* Initialize netdevice notifier after router and SPAN is initialized, + * so that the event handler can use router structures and call SPAN + * respin. */ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); @@@ -4017,12 -3703,6 +3709,6 @@@ goto err_netdev_notifier; }
- err = mlxsw_sp_span_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); - goto err_span_init; - } - err = mlxsw_sp_acl_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); @@@ -4048,12 -3728,12 +3734,12 @@@ err_ports_create err_dpipe_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: - mlxsw_sp_span_fini(mlxsw_sp); - err_span_init: unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_span_fini(mlxsw_sp); + err_span_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); @@@ -4079,9 -3759,9 +3765,9 @@@ static void mlxsw_sp_fini(struct mlxsw_ mlxsw_sp_ports_remove(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); - mlxsw_sp_span_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@@ -4124,70 -3804,6 +3810,6 @@@ static const struct mlxsw_config_profil .resource_query_enable = 1, };
- static bool - mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack, - u64 size) - { - const struct mlxsw_config_profile *profile; - - profile = &mlxsw_sp_config_profile; - if (size % profile->kvd_hash_granularity) { - NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity"); - return false; - } - return true; - } - - static int - mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) - { - NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed"); - return -EINVAL; - } - - static int - mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) - { - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - return 0; - } - - static int - mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) - { - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) { - NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum"); - return -EINVAL; - } - return 0; - } - - static int - mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) - { - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) { - NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum"); - return -EINVAL; - } - return 0; - } - static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); @@@ -4196,29 -3812,17 +3818,16 @@@ return mlxsw_sp_kvdl_occ_get(mlxsw_sp); }
- static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = { - .size_validate = mlxsw_sp_resource_kvd_size_validate, - }; - static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { - .size_validate = mlxsw_sp_resource_kvd_linear_size_validate, .occ_get = mlxsw_sp_resource_kvd_linear_occ_get, };
- static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = { - .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate, - }; - - static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { - .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, - }; -static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; -static struct devlink_resource_size_params mlxsw_sp_linear_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; -- static void -mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) +mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, + struct devlink_resource_size_params *kvd_size_params, + struct devlink_resource_size_params *linear_size_params, + struct devlink_resource_size_params *hash_double_size_params, + struct devlink_resource_size_params *hash_single_size_params) { u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE); @@@ -4227,35 -3831,37 +3836,35 @@@ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); u32 linear_size_min = 0;
- /* KVD top resource */ - mlxsw_sp_kvd_size_params.size_min = kvd_size; - mlxsw_sp_kvd_size_params.size_max = kvd_size; - mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Linear part init */ - mlxsw_sp_linear_size_params.size_min = linear_size_min; - mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - - double_size_min; - mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Hash double part init */ - mlxsw_sp_hash_double_size_params.size_min = double_size_min; - mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - - linear_size_min; - mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Hash single part init */ - mlxsw_sp_hash_single_size_params.size_min = single_size_min; - mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - - linear_size_min; - mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; + devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(linear_size_params, linear_size_min, + kvd_size - single_size_min - + double_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(hash_double_size_params, + double_size_min, + kvd_size - single_size_min - + linear_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(hash_single_size_params, + single_size_min, + kvd_size - double_size_min - + linear_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); }
static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) { struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params hash_single_size_params; + struct devlink_resource_size_params hash_double_size_params; + struct devlink_resource_size_params linear_size_params; + struct devlink_resource_size_params kvd_size_params; u32 kvd_size, single_size, double_size, linear_size; const struct mlxsw_config_profile *profile; int err; @@@ -4264,18 -3870,14 +3873,18 @@@ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) return -EIO;
- mlxsw_sp_resource_size_params_prepare(mlxsw_core); + mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, + &linear_size_params, + &hash_double_size_params, + &hash_single_size_params); + kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, true, kvd_size, MLXSW_SP_RESOURCE_KVD, DEVLINK_RESOURCE_ID_PARENT_TOP, - &mlxsw_sp_kvd_size_params, + &kvd_size_params, - &mlxsw_sp_resource_kvd_ops); + NULL); if (err) return err;
@@@ -4284,11 -3886,15 +3893,15 @@@ false, linear_size, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_linear_size_params, + &linear_size_params, &mlxsw_sp_resource_kvd_linear_ops); if (err) return err;
+ err = mlxsw_sp_kvdl_resources_register(devlink); + if (err) + return err; + double_size = kvd_size - linear_size; double_size *= profile->kvd_hash_double_parts; double_size /= profile->kvd_hash_double_parts + @@@ -4298,8 -3904,8 +3911,8 @@@ false, double_size, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_hash_double_size_params, + &hash_double_size_params, - &mlxsw_sp_resource_kvd_hash_double_ops); + NULL); if (err) return err;
@@@ -4308,8 -3914,8 +3921,8 @@@ false, single_size, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_hash_single_size_params, + &hash_single_size_params, - &mlxsw_sp_resource_kvd_hash_single_ops); + NULL); if (err) return err;
@@@ -4563,13 -4169,11 +4176,11 @@@ mlxsw_sp_master_lag_check(struct mlxsw_ u16 lag_id;
if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { - NL_SET_ERR_MSG(extack, - "spectrum: Exceeded number of supported LAG devices"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); return false; } if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { - NL_SET_ERR_MSG(extack, - "spectrum: LAG device using unsupported Tx type"); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); return false; } return true; @@@ -4811,8 -4415,7 +4422,7 @@@ static int mlxsw_sp_netdevice_port_uppe !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && !netif_is_ovs_master(upper_dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Unknown upper device type"); + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!info->linking) @@@ -4821,8 -4424,7 +4431,7 @@@ (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG(extack, - "spectrum: Enslaving a port to a device that already has an upper device is not supported"); + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } if (netif_is_lag_master(upper_dev) && @@@ -4830,24 -4432,20 +4439,20 @@@ info->upper_info, extack)) return -EINVAL; if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Master device is a LAG master and this device has a VLAN"); + NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); return -EINVAL; } if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { - NL_SET_ERR_MSG(extack, - "spectrum: Can not put a VLAN on a LAG port"); + NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Master device is an OVS master and this device has a VLAN"); + NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; } if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Can not put a VLAN on an OVS port"); + NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } break; @@@ -4960,7 -4558,7 +4565,7 @@@ static int mlxsw_sp_netdevice_port_vlan case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; if (!netif_is_bridge_master(upper_dev)) { - NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); + NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers"); return -EINVAL; } if (!info->linking) @@@ -4969,7 -4567,7 +4574,7 @@@ (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } break; @@@ -5047,10 -4645,18 +4652,18 @@@ static int mlxsw_sp_netdevice_event(str unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mlxsw_sp_span_entry *span_entry; struct mlxsw_sp *mlxsw_sp; int err = 0;
mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); + if (event == NETDEV_UNREGISTER) { + span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); + if (span_entry) + mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); + } + mlxsw_sp_span_respin(mlxsw_sp); + if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, event, ptr); diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4ec1ca3c96c8,d5e711d8ad71..92194a9b2caf --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@@ -70,16 -70,23 +70,23 @@@ #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear" #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single" #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double" + #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles" + #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks" + #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
enum mlxsw_sp_resource_id { MLXSW_SP_RESOURCE_KVD, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, };
struct mlxsw_sp_port; struct mlxsw_sp_rif; + struct mlxsw_sp_span_entry;
struct mlxsw_sp_upper { struct net_device *dev; @@@ -111,32 -118,13 +118,13 @@@ struct mlxsw_sp_mid unsigned long *ports_in_mid; /* bits array */ };
- enum mlxsw_sp_span_type { - MLXSW_SP_SPAN_EGRESS, - MLXSW_SP_SPAN_INGRESS - }; - - struct mlxsw_sp_span_inspected_port { - struct list_head list; - enum mlxsw_sp_span_type type; - u8 local_port; - }; - - struct mlxsw_sp_span_entry { - u8 local_port; - bool used; - struct list_head bound_ports_list; - int ref_count; - int id; - }; - enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, MLXSW_SP_PORT_MALL_SAMPLE, };
struct mlxsw_sp_port_mall_mirror_tc_entry { - u8 to_local_port; + int span_id; bool ingress; };
@@@ -211,7 -199,6 +199,7 @@@ struct mlxsw_sp_port_vlan struct list_head list; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_fid *fid; + unsigned int ref_count; u16 vid; struct mlxsw_sp_bridge_port *bridge_port; struct list_head bridge_vlan_node; @@@ -223,6 -210,8 +211,8 @@@ struct mlxsw_sp_port_xstats u64 wred_drop[TC_MAX_QUEUE]; u64 tail_drop[TC_MAX_QUEUE]; u64 backlog[TC_MAX_QUEUE]; + u64 tx_bytes[IEEE_8021QAZ_MAX_TCS]; + u64 tx_packets[IEEE_8021QAZ_MAX_TCS]; };
struct mlxsw_sp_port { @@@ -260,6 -249,7 +250,7 @@@ struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; struct mlxsw_sp_qdisc *root_qdisc; + struct mlxsw_sp_qdisc *tclass_qdiscs; unsigned acl_rule_count; struct mlxsw_sp_acl_block *ing_acl_block; struct mlxsw_sp_acl_block *eg_acl_block; @@@ -397,16 -387,6 +388,6 @@@ struct mlxsw_sp_port *mlxsw_sp_port_dev struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); - int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type, - bool bind); - void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, - u8 destination_port, - enum mlxsw_sp_span_type type, - bool bind); - struct mlxsw_sp_span_entry * - mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */ #ifdef CONFIG_MLXSW_SPECTRUM_DCB @@@ -462,6 -442,7 +443,7 @@@ int mlxsw_sp_kvdl_alloc_size_query(stru unsigned int entry_count, unsigned int *p_alloc_size); u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp); + int mlxsw_sp_kvdl_resources_register(struct devlink *devlink);
struct mlxsw_sp_acl_rule_info { unsigned int priority; diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 161bcdc012f0,917663adf925..c11c9a635866 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@@ -1203,7 -1203,6 +1203,7 @@@ static int __mlxsw_sp_port_fdb_uc_op(st bool dynamic) { char *sfd_pl; + u8 num_rec; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@@ -1213,16 -1212,9 +1213,16 @@@ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, action, local_port); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out;
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); return err; }
@@@ -1247,7 -1239,6 +1247,7 @@@ static int mlxsw_sp_port_fdb_uc_lag_op( bool adding, bool dynamic) { char *sfd_pl; + u8 num_rec; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@@ -1258,16 -1249,9 +1258,16 @@@ mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, lag_vid, lag_id); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out;
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); return err; }
@@@ -1312,7 -1296,6 +1312,7 @@@ static int mlxsw_sp_port_mdb_op(struct u16 fid, u16 mid_idx, bool adding) { char *sfd_pl; + u8 num_rec; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@@ -1322,15 -1305,7 +1322,15 @@@ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: kfree(sfd_pl); return err; } @@@ -1844,7 -1819,7 +1844,7 @@@ mlxsw_sp_bridge_8021q_port_join(struct struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
if (is_vlan_dev(bridge_port->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge"); + NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); return -EINVAL; }
@@@ -1907,20 -1882,16 +1907,16 @@@ mlxsw_sp_bridge_8021d_port_join(struct struct netlink_ext_ack *extack) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct net_device *dev = bridge_port->dev; u16 vid;
- if (!is_vlan_dev(bridge_port->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge"); - return -EINVAL; - } - vid = vlan_dev_vlan_id(bridge_port->dev); - + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port"); + NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port"); return -EINVAL; }
@@@ -1937,8 -1908,10 +1933,10 @@@ mlxsw_sp_bridge_8021d_port_leave(struc struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - u16 vid = vlan_dev_vlan_id(bridge_port->dev); + struct net_device *dev = bridge_port->dev; + u16 vid;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return; diff --combined drivers/net/ethernet/renesas/sh_eth.c index 14c839bb09e7,d3e1bc05ca9c..3557fe3f2bb5 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -123,8 -123,8 +123,8 @@@ static const u16 sh_eth_offset_gigabit[ [TSU_FWSL0] = 0x0030, [TSU_FWSL1] = 0x0034, [TSU_FWSLC] = 0x0038, - [TSU_QTAG0] = 0x0040, - [TSU_QTAG1] = 0x0044, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, [TSU_FWSR] = 0x0050, [TSU_FWINMK] = 0x0054, [TSU_ADQT0] = 0x0048, @@@ -439,17 -439,6 +439,17 @@@ static void sh_eth_modify(struct net_de enum_index); }
+static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, + int enum_index) +{ + iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + +static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) +{ + return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@@ -763,6 -752,7 +763,7 @@@ static struct sh_eth_cpu_data sh7757_da .rpadir = 1, .rpadir_value = 2 << 16, .rtrate = 1, + .dual_port = 1, };
#define SH_GIGA_ETH_BASE 0xfee00000UL @@@ -841,6 -831,7 +842,7 @@@ static struct sh_eth_cpu_data sh7757_da .no_trimd = 1, .no_ade = 1, .tsu = 1, + .dual_port = 1, };
/* SH7734 */ @@@ -911,6 -902,7 +913,7 @@@ static struct sh_eth_cpu_data sh7763_da .tsu = 1, .irq_flags = IRQF_SHARED, .magic = 1, + .dual_port = 1, };
static struct sh_eth_cpu_data sh7619_data = { @@@ -943,6 -935,7 +946,7 @@@ static struct sh_eth_cpu_data sh771x_da EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, + .dual_port = 1, };
static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) @@@ -972,20 -965,16 +976,16 @@@
static int sh_eth_check_reset(struct net_device *ndev) { - int ret = 0; - int cnt = 100; + int cnt;
- while (cnt > 0) { + for (cnt = 100; cnt > 0; cnt--) { if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) - break; + return 0; mdelay(1); - cnt--; - } - if (cnt <= 0) { - netdev_err(ndev, "Device reset failed\n"); - ret = -ETIMEDOUT; } - return ret; + + netdev_err(ndev, "Device reset failed\n"); + return -ETIMEDOUT; }
static int sh_eth_reset(struct net_device *ndev) @@@ -2112,8 -2101,6 +2112,6 @@@ static size_t __sh_eth_get_regs(struct add_tsu_reg(TSU_FWSL0); add_tsu_reg(TSU_FWSL1); add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAG0); - add_tsu_reg(TSU_QTAG1); add_tsu_reg(TSU_QTAGM0); add_tsu_reg(TSU_QTAGM1); add_tsu_reg(TSU_FWSR); @@@ -2932,7 -2919,7 +2930,7 @@@ static int sh_eth_vlan_rx_kill_vid(stru /* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) { - if (sh_eth_is_rz_fast_ether(mdp)) { + if (!mdp->cd->dual_port) { sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); /* Enable POST registers */ @@@ -2949,13 -2936,8 +2947,8 @@@ sh_eth_tsu_write(mdp, 0, TSU_FWSL0); sh_eth_tsu_write(mdp, 0, TSU_FWSL1); sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); - if (sh_eth_is_gether(mdp)) { - sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ - sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ - } else { - sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ - sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ - } + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ diff --combined drivers/net/ethernet/renesas/sh_eth.h index e5fe70134690,5bbaf9e56e92..21047d58a93f --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@@ -118,8 -118,8 +118,8 @@@ enum TSU_FWSL0, TSU_FWSL1, TSU_FWSLC, - TSU_QTAG0, - TSU_QTAG1, + TSU_QTAG0, /* Same as TSU_QTAGM0 */ + TSU_QTAG1, /* Same as TSU_QTAGM1 */ TSU_QTAGM0, TSU_QTAGM1, TSU_FWSR, @@@ -509,6 -509,7 +509,7 @@@ struct sh_eth_cpu_data unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ + unsigned dual_port:1; /* Dual EtherC/E-DMAC */ };
struct sh_eth_private { @@@ -567,4 -568,15 +568,4 @@@ static inline void *sh_eth_tsu_get_offs return mdp->tsu_addr + mdp->reg_offset[enum_index]; }
-static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, - int enum_index) -{ - iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - -static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) -{ - return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - #endif /* #ifndef __SH_ETH_H__ */ diff --combined drivers/net/ppp/ppp_generic.c index fa2a9bdd1866,a393c1dff7dc..7dc2f34e7229 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@@ -971,6 -971,7 +971,7 @@@ static struct pernet_operations ppp_net .exit = ppp_exit_net, .id = &ppp_net_id, .size = sizeof(struct ppp_net), + .async = true, };
static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) @@@ -3161,15 -3162,6 +3162,15 @@@ ppp_connect_channel(struct channel *pch goto outl;
ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --combined drivers/net/tun.c index 7433bb2e4451,d531954512c7..475088f947bb --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -78,6 -78,7 +78,7 @@@ #include <linux/mutex.h>
#include <linux/uaccess.h> + #include <linux/proc_fs.h>
/* Uncomment to enable debugging */ /* #define TUN_DEBUG 1 */ @@@ -181,6 -182,7 +182,6 @@@ struct tun_file struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; - int xdp_pending_pkts; };
struct tun_flow_entry { @@@ -1642,7 -1644,6 +1643,7 @@@ static struct sk_buff *tun_build_skb(st else *skb_xdp = 0;
+ preempt_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@@ -1662,12 -1663,11 +1663,12 @@@ case XDP_REDIRECT: get_page(alloc_frag->page); alloc_frag->offset += buflen; - ++tfile->xdp_pending_pkts; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + xdp_do_flush_map(); if (err) goto err_redirect; rcu_read_unlock(); + preempt_enable(); return NULL; case XDP_TX: xdp_xmit = true; @@@ -1689,7 -1689,6 +1690,7 @@@ skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); + preempt_enable(); return ERR_PTR(-ENOMEM); }
@@@ -1702,12 -1701,10 +1703,12 @@@ skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); rcu_read_unlock(); + preempt_enable(); return NULL; }
rcu_read_unlock(); + preempt_enable();
return skb;
@@@ -1715,7 -1712,6 +1716,7 @@@ err_redirect put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); + preempt_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@@ -1989,6 -1985,11 +1990,6 @@@ static ssize_t tun_chr_write_iter(struc result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK, false);
- if (tfile->xdp_pending_pkts) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return result; } @@@ -2286,11 -2287,67 +2287,67 @@@ static int tun_validate(struct nlattr * return -EINVAL; }
+ static size_t tun_get_size(const struct net_device *dev) + { + BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); + BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); + + return nla_total_size(sizeof(uid_t)) + /* OWNER */ + nla_total_size(sizeof(gid_t)) + /* GROUP */ + nla_total_size(sizeof(u8)) + /* TYPE */ + nla_total_size(sizeof(u8)) + /* PI */ + nla_total_size(sizeof(u8)) + /* VNET_HDR */ + nla_total_size(sizeof(u8)) + /* PERSIST */ + nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ + nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ + nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ + 0; + } + + static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) + { + struct tun_struct *tun = netdev_priv(dev); + + if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) + goto nla_put_failure; + if (uid_valid(tun->owner) && + nla_put_u32(skb, IFLA_TUN_OWNER, + from_kuid_munged(current_user_ns(), tun->owner))) + goto nla_put_failure; + if (gid_valid(tun->group) && + nla_put_u32(skb, IFLA_TUN_GROUP, + from_kgid_munged(current_user_ns(), tun->group))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, + !!(tun->flags & IFF_MULTI_QUEUE))) + goto nla_put_failure; + if (tun->flags & IFF_MULTI_QUEUE) { + if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, + tun->numdisabled)) + goto nla_put_failure; + } + + return 0; + + nla_put_failure: + return -EMSGSIZE; + } + static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, + .get_size = tun_get_size, + .fill_info = tun_fill_info, };
static void tun_sock_write_space(struct sock *sk) @@@ -2325,6 -2382,13 +2382,6 @@@ static int tun_sendmsg(struct socket *s ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); - - if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || - !(m->msg_flags & MSG_MORE)) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return ret; } @@@ -2782,6 -2846,7 +2839,7 @@@ static long __tun_chr_ioctl(struct fil struct tun_struct *tun; void __user* argp = (void __user*)arg; struct ifreq ifr; + struct net *net; kuid_t owner; kgid_t group; int sndbuf; @@@ -2790,7 -2855,8 +2848,8 @@@ int le; int ret;
- if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || + (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; } else { @@@ -2810,6 -2876,7 +2869,7 @@@ rtnl_lock();
tun = tun_get(tfile); + net = sock_net(&tfile->sk); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) @@@ -2817,7 -2884,7 +2877,7 @@@
ifr.ifr_name[IFNAMSIZ-1] = '\0';
- ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); + ret = tun_set_iff(net, file, &ifr);
if (ret) goto unlock; @@@ -2839,6 -2906,14 +2899,14 @@@ tfile->ifindex = ifindex; goto unlock; } + if (cmd == SIOCGSKNS) { + ret = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + goto unlock; + + ret = open_related_ns(&net->ns, get_net_ns); + goto unlock; + }
ret = -EBADFD; if (!tun) @@@ -3156,6 -3231,7 +3224,6 @@@ static int tun_chr_open(struct inode *i sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); - tfile->xdp_pending_pkts = 0;
return 0; } diff --combined fs/lockd/svc.c index 346ed161756d,2dee4e03ff1c..3a4f1a1f48e4 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@@ -57,8 -57,8 +57,8 @@@ static struct task_struct *nlmsvc_task static struct svc_rqst *nlmsvc_rqst; unsigned long nlmsvc_timeout;
-atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0); -DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq); +static atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
unsigned int lockd_net_id;
@@@ -709,6 -709,7 +709,7 @@@ static struct pernet_operations lockd_n .exit = lockd_exit_net, .id = &lockd_net_id, .size = sizeof(struct lockd_net), + .async = true, };
diff --combined include/linux/phy.h index d7069539f351,6e38c699b753..5a9b1753fdc5 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@@ -924,7 -924,6 +924,7 @@@ void phy_device_remove(struct phy_devic int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); +int __phy_resume(struct phy_device *phydev); int phy_loopback(struct phy_device *phydev, bool enable); struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phy_interface_t interface); @@@ -995,6 -994,14 +995,14 @@@ int genphy_c45_pma_setup_forced(struct int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev);
+ /* The gen10g_* functions are the old Clause 45 stub */ + int gen10g_config_aneg(struct phy_device *phydev); + int gen10g_read_status(struct phy_device *phydev); + int gen10g_no_soft_reset(struct phy_device *phydev); + int gen10g_config_init(struct phy_device *phydev); + int gen10g_suspend(struct phy_device *phydev); + int gen10g_resume(struct phy_device *phydev); + static inline int phy_read_status(struct phy_device *phydev) { if (!phydev->drv) diff --combined include/linux/skbuff.h index 99df17109e1b,9bc1750ca3d3..47082f54ec1f --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -466,6 -466,9 +466,9 @@@ struct ubuf_info
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+ int mm_account_pinned_pages(struct mmpin *mmp, size_t size); + void mm_unaccount_pinned_pages(struct mmpin *mmp); + struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *uarg); @@@ -3285,7 -3288,8 +3288,7 @@@ int skb_zerocopy(struct sk_buff *to, st void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); -unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); -bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); +bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); @@@ -4037,12 -4041,6 +4040,12 @@@ static inline bool skb_is_gso_v6(const return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; }
+/* Note: Should be called only if skb_is_gso(skb) is true */ +static inline bool skb_is_gso_sctp(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; +} + static inline void skb_gso_reset(struct sk_buff *skb) { skb_shinfo(skb)->gso_size = 0; @@@ -4050,22 -4048,6 +4053,22 @@@ skb_shinfo(skb)->gso_type = 0; }
+static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, + u16 increment) +{ + if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) + return; + shinfo->gso_size += increment; +} + +static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, + u16 decrement) +{ + if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) + return; + shinfo->gso_size -= decrement; +} + void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb) @@@ -4125,6 -4107,38 +4128,6 @@@ static inline bool skb_head_is_locked(c return !skb->head_frag || skb_cloned(skb); }
-/** - * skb_gso_network_seglen - Return length of individual segments of a gso packet - * - * @skb: GSO skb - * - * skb_gso_network_seglen is used to determine the real size of the - * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). - * - * The MAC/L2 header is not accounted for. - */ -static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) -{ - unsigned int hdr_len = skb_transport_header(skb) - - skb_network_header(skb); - return hdr_len + skb_gso_transport_seglen(skb); -} - -/** - * skb_gso_mac_seglen - Return length of individual segments of a gso packet - * - * @skb: GSO skb - * - * skb_gso_mac_seglen is used to determine the real size of the - * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 - * headers (TCP/UDP). - */ -static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) -{ - unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); - return hdr_len + skb_gso_transport_seglen(skb); -} - /* Local Checksum Offload. * Compute outer checksum based on the assumption that the * inner checksum will be offloaded later. diff --combined include/net/devlink.h index 4de35ed12bcc,8d1c3f276dea..c83125ad20ff --- a/include/net/devlink.h +++ b/include/net/devlink.h @@@ -234,13 -234,9 +234,9 @@@ struct devlink_dpipe_headers /** * struct devlink_resource_ops - resource ops * @occ_get: get the occupied size - * @size_validate: validate the size of the resource before update, reload - * is needed for changes to take place */ struct devlink_resource_ops { u64 (*occ_get)(struct devlink *devlink); - int (*size_validate)(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack); };
/** @@@ -257,18 -253,6 +253,18 @@@ struct devlink_resource_size_params enum devlink_resource_unit unit; };
+static inline void +devlink_resource_size_params_init(struct devlink_resource_size_params *size_params, + u64 size_min, u64 size_max, + u64 size_granularity, + enum devlink_resource_unit unit) +{ + size_params->size_min = size_min; + size_params->size_max = size_max; + size_params->size_granularity = size_granularity; + size_params->unit = unit; +} + /** * struct devlink_resource - devlink resource * @name: name of the resource @@@ -290,7 -274,7 +286,7 @@@ struct devlink_resource u64 size_new; bool size_valid; struct devlink_resource *parent; - struct devlink_resource_size_params *size_params; + struct devlink_resource_size_params size_params; struct list_head list; struct list_head resource_list; const struct devlink_resource_ops *resource_ops; @@@ -414,7 -398,7 +410,7 @@@ int devlink_resource_register(struct de u64 resource_size, u64 resource_id, u64 parent_resource_id, - struct devlink_resource_size_params *size_params, + const struct devlink_resource_size_params *size_params, const struct devlink_resource_ops *resource_ops); void devlink_resources_unregister(struct devlink *devlink, struct devlink_resource *resource); @@@ -568,7 -552,7 +564,7 @@@ devlink_resource_register(struct devlin u64 resource_size, u64 resource_id, u64 parent_resource_id, - struct devlink_resource_size_params *size_params, + const struct devlink_resource_size_params *size_params, const struct devlink_resource_ops *resource_ops) { return 0; diff --combined include/net/ip6_route.h index ac0866bb9e93,ce2abc0ff102..0084013d6bed --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@@ -75,7 -75,8 +75,8 @@@ static inline bool rt6_qualify_for_ecmp void ip6_route_input(struct sk_buff *skb); struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, - struct flowi6 *fl6, int flags); + struct flowi6 *fl6, + const struct sk_buff *skb, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags); @@@ -88,9 -89,10 +89,10 @@@ static inline struct dst_entry *ip6_rou }
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, - int flags); + const struct sk_buff *skb, int flags); struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, - int ifindex, struct flowi6 *fl6, int flags); + int ifindex, struct flowi6 *fl6, + const struct sk_buff *skb, int flags);
void ip6_route_init_special_entries(void); int ip6_route_init(void); @@@ -126,8 -128,10 +128,10 @@@ static inline int ip6_route_get_saddr(s }
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, - const struct in6_addr *saddr, int oif, int flags); - u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb); + const struct in6_addr *saddr, int oif, + const struct sk_buff *skb, int flags); + u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, + const struct sk_buff *skb, struct flow_keys *hkeys);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
@@@ -179,9 -183,6 +183,9 @@@ void rt6_disable_ip(struct net_device * void rt6_sync_down_dev(struct net_device *dev, unsigned long event); void rt6_multipath_rebalance(struct rt6_info *rt);
+void rt6_uncached_list_add(struct rt6_info *rt); +void rt6_uncached_list_del(struct rt6_info *rt); + static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) { const struct dst_entry *dst = skb_dst(skb); @@@ -269,4 -270,5 +273,5 @@@ static inline bool rt6_duplicate_nextho ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); } + #endif diff --combined include/net/route.h index 40b870d58f38,158833ea7988..ccf52f0958bb --- a/include/net/route.h +++ b/include/net/route.h @@@ -65,8 -65,6 +65,6 @@@ struct rtable /* Miscellaneous cached information */ u32 rt_pmtu;
- u32 rt_table_id; - struct list_head rt_uncached; struct uncached_list *rt_uncached_list; }; @@@ -227,9 -225,6 +225,9 @@@ struct in_ifaddr void fib_add_ifaddr(struct in_ifaddr *); void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void rt_add_uncached_list(struct rtable *rt); +void rt_del_uncached_list(struct rtable *rt); + static inline void ip_rt_put(struct rtable *rt) { /* dst_release() accepts a NULL parameter. diff --combined kernel/bpf/verifier.c index c6eff108aa99,3c74b163eaeb..eb79a34359c0 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -508,10 -508,6 +508,6 @@@ err static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; - #define CALLEE_SAVED_REGS 5 - static const int callee_saved[CALLEE_SAVED_REGS] = { - BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9 - };
static void __mark_reg_not_init(struct bpf_reg_state *reg);
@@@ -1356,13 -1352,6 +1352,13 @@@ static bool is_ctx_reg(struct bpf_verif return reg->type == PTR_TO_CTX; }
+static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) +{ + const struct bpf_reg_state *reg = cur_regs(env) + regno; + + return type_is_pkt_pointer(reg->type); +} + static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) @@@ -1423,10 -1412,10 +1419,10 @@@ static int check_generic_ptr_alignment( }
static int check_ptr_alignment(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, - int off, int size) + const struct bpf_reg_state *reg, int off, + int size, bool strict_alignment_once) { - bool strict = env->strict_alignment; + bool strict = env->strict_alignment || strict_alignment_once; const char *pointer_desc = "";
switch (reg->type) { @@@ -1583,9 -1572,9 +1579,9 @@@ static void coerce_reg_to_size(struct b * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ -static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, - int bpf_size, enum bpf_access_type t, - int value_regno) +static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, + int off, int bpf_size, enum bpf_access_type t, + int value_regno, bool strict_alignment_once) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; @@@ -1597,7 -1586,7 +1593,7 @@@ return size;
/* alignment checks will add in reg->off themselves */ - err = check_ptr_alignment(env, reg, off, size); + err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); if (err) return err;
@@@ -1742,23 -1731,21 +1738,23 @@@ static int check_xadd(struct bpf_verifi return -EACCES; }
- if (is_ctx_reg(env, insn->dst_reg)) { - verbose(env, "BPF_XADD stores into R%d context is not allowed\n", - insn->dst_reg); + if (is_ctx_reg(env, insn->dst_reg) || + is_pkt_reg(env, insn->dst_reg)) { + verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", + insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? + "context" : "packet"); return -EACCES; }
/* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, - BPF_SIZE(insn->code), BPF_READ, -1); + BPF_SIZE(insn->code), BPF_READ, -1, true); if (err) return err;
/* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, - BPF_SIZE(insn->code), BPF_WRITE, -1); + BPF_SIZE(insn->code), BPF_WRITE, -1, true); }
/* when register 'regno' is passed into function that will read 'access_size' @@@ -2397,8 -2384,7 +2393,8 @@@ static int check_helper_call(struct bpf * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { - err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); + err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, + BPF_WRITE, -1, false); if (err) return err; } @@@ -4642,7 -4628,7 +4638,7 @@@ static int do_check(struct bpf_verifier */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, - insn->dst_reg); + insn->dst_reg, false); if (err) return err;
@@@ -4694,7 -4680,7 +4690,7 @@@ /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, - insn->src_reg); + insn->src_reg, false); if (err) return err;
@@@ -4729,7 -4715,7 +4725,7 @@@ /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, - -1); + -1, false); if (err) return err;
diff --combined net/batman-adv/bat_iv_ogm.c index 99abeadf416e,e21aa147607b..be09a9883825 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -157,7 -157,7 +157,7 @@@ static void batadv_iv_ogm_orig_free(str * Return: 0 on success, a negative error code otherwise. */ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, - int max_if_num) + unsigned int max_if_num) { void *data_ptr; size_t old_size; @@@ -201,8 -201,7 +201,8 @@@ unlock */ static void batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, - int max_if_num, int del_if_num) + unsigned int max_if_num, + unsigned int del_if_num) { size_t chunk_size; size_t if_offset; @@@ -240,8 -239,7 +240,8 @@@ */ static void batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, - int max_if_num, int del_if_num) + unsigned int max_if_num, + unsigned int del_if_num) { size_t if_offset; void *data_ptr; @@@ -278,8 -276,7 +278,8 @@@ * Return: 0 on success, a negative error code otherwise. */ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, - int max_if_num, int del_if_num) + unsigned int max_if_num, + unsigned int del_if_num) { spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
@@@ -314,8 -311,7 +314,8 @@@ static struct batadv_orig_node batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) { struct batadv_orig_node *orig_node; - int size, hash_added; + int hash_added; + size_t size;
orig_node = batadv_orig_hash_find(bat_priv, addr); if (orig_node) @@@ -897,7 -893,7 +897,7 @@@ batadv_iv_ogm_slide_own_bcast_window(st u32 i; size_t word_index; u8 *w; - int if_num; + unsigned int if_num;
for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@@ -1027,7 -1023,7 +1027,7 @@@ batadv_iv_ogm_orig_update(struct batadv struct batadv_neigh_node *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; struct batadv_orig_node *orig_node_tmp; - int if_num; + unsigned int if_num; u8 sum_orig, sum_neigh; u8 *neigh_addr; u8 tq_avg; @@@ -1186,7 -1182,7 +1186,7 @@@ static bool batadv_iv_ogm_calc_tq(struc u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; - int if_num; + unsigned int if_num; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; unsigned int tq_iface_penalty; @@@ -1706,9 -1702,9 +1706,9 @@@ static void batadv_iv_ogm_process(cons
if (is_my_orig) { unsigned long *word; - int offset; + size_t offset; s32 bit_pos; - s16 if_num; + unsigned int if_num; u8 *weight;
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, @@@ -2733,7 -2729,7 +2733,7 @@@ static int batadv_iv_gw_dump_entry(stru struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; struct batadv_gw_node *curr_gw; - int ret = -EINVAL; + int ret = 0; void *hdr;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); diff --combined net/batman-adv/bat_v.c index c74f81341dab,9c3a34b65b15..ec93337ee259 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors: * * Linus Lüssing, Marek Lindner * @@@ -928,7 -928,7 +928,7 @@@ static int batadv_v_gw_dump_entry(struc struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; struct batadv_gw_node *curr_gw; - int ret = -EINVAL; + int ret = 0; void *hdr;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); diff --combined net/batman-adv/bridge_loop_avoidance.c index b1a08374088b,8ff81346ff0c..a2de5a44bd41 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors: * * Simon Wunderlich * @@@ -2161,25 -2161,22 +2161,25 @@@ batadv_bla_claim_dump_bucket(struct sk_ { struct batadv_bla_claim *claim; int idx = 0; + int ret = 0;
rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { if (idx++ < *idx_skip) continue; - if (batadv_bla_claim_dump_entry(msg, portid, seq, - primary_if, claim)) { + + ret = batadv_bla_claim_dump_entry(msg, portid, seq, + primary_if, claim); + if (ret) { *idx_skip = idx - 1; goto unlock; } }
- *idx_skip = idx; + *idx_skip = 0; unlock: rcu_read_unlock(); - return 0; + return ret; }
/** @@@ -2394,25 -2391,22 +2394,25 @@@ batadv_bla_backbone_dump_bucket(struct { struct batadv_bla_backbone_gw *backbone_gw; int idx = 0; + int ret = 0;
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { if (idx++ < *idx_skip) continue; - if (batadv_bla_backbone_dump_entry(msg, portid, seq, - primary_if, backbone_gw)) { + + ret = batadv_bla_backbone_dump_entry(msg, portid, seq, + primary_if, backbone_gw); + if (ret) { *idx_skip = idx - 1; goto unlock; } }
- *idx_skip = idx; + *idx_skip = 0; unlock: rcu_read_unlock(); - return 0; + return ret; }
/** diff --combined net/batman-adv/fragmentation.c index 5afe641ee4b0,d815acc13c35..0fddc17106bd --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors: * * Martin Hundebøll martin@hundeboll.net * @@@ -288,8 -288,7 +288,8 @@@ batadv_frag_merge_packets(struct hlist_ /* Move the existing MAC header to just before the payload. (Override * the fragment header.) */ - skb_pull_rcsum(skb_out, hdr_size); + skb_pull(skb_out, hdr_size); + skb_out->ip_summed = CHECKSUM_NONE; memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); skb_set_mac_header(skb_out, -ETH_HLEN); skb_reset_network_header(skb_out); diff --combined net/batman-adv/hard-interface.c index 68b54a39c51d,fd4a263dd6b7..c405d15befd6 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -763,11 -763,6 +763,11 @@@ int batadv_hardif_enable_interface(stru hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface);
+ if (bat_priv->num_ifaces >= UINT_MAX) { + ret = -ENOSPC; + goto err_dev; + } + ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface, NULL, NULL, NULL); if (ret) @@@ -881,7 -876,7 +881,7 @@@ void batadv_hardif_disable_interface(st batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
/* nobody uses this interface anymore */ - if (!bat_priv->num_ifaces) { + if (bat_priv->num_ifaces == 0) { batadv_gw_check_client_stop(bat_priv);
if (autodel == BATADV_IF_CLEANUP_AUTO) @@@ -917,7 -912,7 +917,7 @@@ batadv_hardif_add_interface(struct net_ if (ret) goto free_if;
- hard_iface->if_num = -1; + hard_iface->if_num = 0; hard_iface->net_dev = net_dev; hard_iface->soft_iface = NULL; hard_iface->if_status = BATADV_IF_NOT_IN_USE; diff --combined net/batman-adv/originator.c index 74782426bb77,2a51a0cbb82a..716e5b43acfa --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -1569,7 -1569,7 +1569,7 @@@ int batadv_orig_dump(struct sk_buff *ms * Return: 0 on success or negative error number in case of failure */ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, - int max_if_num) + unsigned int max_if_num) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct batadv_algo_ops *bao = bat_priv->algo_ops; @@@ -1611,7 -1611,7 +1611,7 @@@ err * Return: 0 on success or negative error number in case of failure */ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, - int max_if_num) + unsigned int max_if_num) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct batadv_hashtable *hash = bat_priv->orig_hash; diff --combined net/batman-adv/originator.h index 15d896b2de6f,f3601ab0872e..3b3f59b881e1 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@@ -1,5 -1,5 +1,5 @@@ /* SPDX-License-Identifier: GPL-2.0 */ - /* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -73,9 -73,9 +73,9 @@@ int batadv_orig_seq_print_text(struct s int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, - int max_if_num); + unsigned int max_if_num); int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, - int max_if_num); + unsigned int max_if_num); struct batadv_orig_node_vlan * batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, unsigned short vid); diff --combined net/batman-adv/soft-interface.c index 367a81fb785f,c95e2b2677fd..edeffcb9f3a2 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -1,5 -1,5 +1,5 @@@ // SPDX-License-Identifier: GPL-2.0 - /* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -459,7 -459,13 +459,7 @@@ void batadv_interface_rx(struct net_dev
/* skb->dev & skb->pkt_type are set here */ skb->protocol = eth_type_trans(skb, soft_iface); - - /* should not be necessary anymore as we use skb_pull_rcsum() - * TODO: please verify this and remove this TODO - * -- Dec 21st 2009, Simon Wunderlich - */ - - /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, diff --combined net/batman-adv/types.h index a5aa6d61f4e2,4a3b8837e1b5..476b052ad982 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -1,5 -1,5 +1,5 @@@ /* SPDX-License-Identifier: GPL-2.0 */ - /* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: + /* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -167,7 -167,7 +167,7 @@@ struct batadv_hard_iface struct list_head list;
/** @if_num: identificator of the interface */ - s16 if_num; + unsigned int if_num;
/** @if_status: status of the interface for batman-adv */ char if_status; @@@ -1596,7 -1596,7 +1596,7 @@@ struct batadv_priv atomic_t batman_queue_left;
/** @num_ifaces: number of interfaces assigned to this mesh interface */ - char num_ifaces; + unsigned int num_ifaces;
/** @mesh_obj: kobject for sysfs mesh subdirectory */ struct kobject *mesh_obj; @@@ -2186,16 -2186,15 +2186,16 @@@ struct batadv_algo_orig_ops * orig_node due to a new hard-interface being added into the mesh * (optional) */ - int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); + int (*add_if)(struct batadv_orig_node *orig_node, + unsigned int max_if_num);
/** * @del_if: ask the routing algorithm to apply the needed changes to the * orig_node due to an hard-interface being removed from the mesh * (optional) */ - int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, - int del_if_num); + int (*del_if)(struct batadv_orig_node *orig_node, + unsigned int max_if_num, unsigned int del_if_num);
#ifdef CONFIG_BATMAN_ADV_DEBUGFS /** @print: print the originator table (optional) */ diff --combined net/bridge/br_netfilter_hooks.c index 9b16eaf33819,484f54150525..c2120eb889a9 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@@ -214,7 -214,7 +214,7 @@@ static int br_validate_ipv4(struct net
iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) - goto inhdr_error; + goto csum_error;
len = ntohs(iph->tot_len); if (skb->len < len) { @@@ -236,8 -236,6 +236,8 @@@ */ return 0;
+csum_error: + __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); inhdr_error: __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); drop: @@@ -969,6 -967,7 +969,7 @@@ static struct pernet_operations brnf_ne .exit = brnf_exit_net, .id = &brnf_net_id, .size = sizeof(struct brnf_net), + .async = true, };
static struct notifier_block brnf_notifier __read_mostly = { diff --combined net/core/dev.c index 2cedf520cb28,8b51f923ce99..e5b8d42b6410 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -2378,7 -2378,7 +2378,7 @@@ EXPORT_SYMBOL(netdev_set_num_tc)
/* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues - * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. + * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. */ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { @@@ -6396,7 -6396,6 +6396,7 @@@ static int __netdev_upper_dev_link(stru .linking = true, .upper_info = upper_info, }; + struct net_device *master_dev; int ret = 0;
ASSERT_RTNL(); @@@ -6408,14 -6407,11 +6408,14 @@@ if (netdev_has_upper_dev(upper_dev, dev)) return -EBUSY;
- if (netdev_has_upper_dev(dev, upper_dev)) - return -EEXIST; - - if (master && netdev_master_upper_dev_get(dev)) - return -EBUSY; + if (!master) { + if (netdev_has_upper_dev(dev, upper_dev)) + return -EEXIST; + } else { + master_dev = netdev_master_upper_dev_get(dev); + if (master_dev) + return master_dev == upper_dev ? -EEXIST : -EBUSY; + }
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, &changeupper_info.info); @@@ -7546,6 -7542,12 +7546,12 @@@ static netdev_features_t netdev_fix_fea } }
+ /* LRO feature cannot be combined with RX-FCS */ + if ((features & NETIF_F_LRO) && (features & NETIF_F_RXFCS)) { + netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); + features &= ~NETIF_F_LRO; + } + return features; }
@@@ -8145,8 -8147,9 +8151,9 @@@ void netdev_run_todo(void BUG_ON(!list_empty(&dev->ptype_specific)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); + #if IS_ENABLED(CONFIG_DECNET) WARN_ON(dev->dn_ptr); - + #endif if (dev->priv_destructor) dev->priv_destructor(dev); if (dev->needs_free_netdev) @@@ -8844,6 -8847,7 +8851,7 @@@ static void __net_exit netdev_exit(stru static struct pernet_operations __net_initdata netdev_net_ops = { .init = netdev_init, .exit = netdev_exit, + .async = true, };
static void __net_exit default_device_exit(struct net *net) @@@ -8944,6 -8948,7 +8952,7 @@@ static void __net_exit default_device_e static struct pernet_operations __net_initdata default_device_ops = { .exit = default_device_exit, .exit_batch = default_device_exit_batch, + .async = true, };
/* diff --combined net/core/devlink.c index 2f2307d94787,88e846779269..1b5bf0d1cee9 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@@ -1695,11 -1695,10 +1695,11 @@@ static int devlink_dpipe_table_put(stru goto nla_put_failure;
if (table->resource_valid) { - nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, - table->resource_id, DEVLINK_ATTR_PAD); - nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, - table->resource_units, DEVLINK_ATTR_PAD); + if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, + table->resource_id, DEVLINK_ATTR_PAD) || + nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, + table->resource_units, DEVLINK_ATTR_PAD)) + goto nla_put_failure; } if (devlink_dpipe_matches_put(table, skb)) goto nla_put_failure; @@@ -2333,12 -2332,38 +2333,38 @@@ devlink_resource_validate_children(stru list_for_each_entry(child_resource, &resource->resource_list, list) parts_size += child_resource->size_new;
- if (parts_size > resource->size) + if (parts_size > resource->size_new) size_valid = false; out: resource->size_valid = size_valid; }
+ static int + devlink_resource_validate_size(struct devlink_resource *resource, u64 size, + struct netlink_ext_ack *extack) + { + u64 reminder; + int err = 0; + - if (size > resource->size_params->size_max) { ++ if (size > resource->size_params.size_max) { + NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum"); + err = -EINVAL; + } + - if (size < resource->size_params->size_min) { ++ if (size < resource->size_params.size_min) { + NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum"); + err = -EINVAL; + } + - div64_u64_rem(size, resource->size_params->size_granularity, &reminder); ++ div64_u64_rem(size, resource->size_params.size_granularity, &reminder); + if (reminder) { + NL_SET_ERR_MSG_MOD(extack, "Wrong granularity"); + err = -EINVAL; + } + + return err; + } + static int devlink_nl_cmd_resource_set(struct sk_buff *skb, struct genl_info *info) { @@@ -2357,12 -2382,8 +2383,8 @@@ if (!resource) return -EINVAL;
- if (!resource->resource_ops->size_validate) - return -EINVAL; - size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]); - err = resource->resource_ops->size_validate(devlink, size, - info->extack); + err = devlink_resource_validate_size(resource, size, info->extack); if (err) return err;
@@@ -2373,22 -2394,20 +2395,22 @@@ return 0; }
-static void +static int devlink_resource_size_params_put(struct devlink_resource *resource, struct sk_buff *skb) { struct devlink_resource_size_params *size_params;
- size_params = resource->size_params; - nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, - size_params->size_granularity, DEVLINK_ATTR_PAD); - nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, - size_params->size_max, DEVLINK_ATTR_PAD); - nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, - size_params->size_min, DEVLINK_ATTR_PAD); - nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit); + size_params = &resource->size_params; + if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, + size_params->size_granularity, DEVLINK_ATTR_PAD) || + nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, + size_params->size_max, DEVLINK_ATTR_PAD) || + nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, + size_params->size_min, DEVLINK_ATTR_PAD) || + nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit)) + return -EMSGSIZE; + return 0; }
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, @@@ -2412,12 -2431,10 +2434,12 @@@ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, resource->size_new, DEVLINK_ATTR_PAD); if (resource->resource_ops && resource->resource_ops->occ_get) - nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, - resource->resource_ops->occ_get(devlink), - DEVLINK_ATTR_PAD); - devlink_resource_size_params_put(resource, skb); + if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, + resource->resource_ops->occ_get(devlink), + DEVLINK_ATTR_PAD)) + goto nla_put_failure; + if (devlink_resource_size_params_put(resource, skb)) + goto nla_put_failure; if (list_empty(&resource->resource_list)) goto out;
@@@ -3156,7 -3173,7 +3178,7 @@@ int devlink_resource_register(struct de u64 resource_size, u64 resource_id, u64 parent_resource_id, - struct devlink_resource_size_params *size_params, + const struct devlink_resource_size_params *size_params, const struct devlink_resource_ops *resource_ops) { struct devlink_resource *resource; @@@ -3199,8 -3216,7 +3221,8 @@@ resource->id = resource_id; resource->resource_ops = resource_ops; resource->size_valid = true; - resource->size_params = size_params; + memcpy(&resource->size_params, size_params, + sizeof(resource->size_params)); INIT_LIST_HEAD(&resource->resource_list); list_add_tail(&resource->list, resource_list); out: diff --combined net/core/filter.c index 48aa7c7320db,33edfa8372fd..948d9a8ddcad --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -2087,10 -2087,6 +2087,10 @@@ static int bpf_skb_proto_4_to_6(struct u32 off = skb_mac_header_len(skb); int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ + if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + return -ENOTSUPP; + ret = skb_cow(skb, len_diff); if (unlikely(ret < 0)) return ret; @@@ -2100,21 -2096,19 +2100,21 @@@ return ret;
if (skb_is_gso(skb)) { + struct skb_shared_info *shinfo = skb_shinfo(skb); + /* SKB_GSO_TCPV4 needs to be changed into * SKB_GSO_TCPV6. */ - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { - skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; - skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; + if (shinfo->gso_type & SKB_GSO_TCPV4) { + shinfo->gso_type &= ~SKB_GSO_TCPV4; + shinfo->gso_type |= SKB_GSO_TCPV6; }
/* Due to IPv6 header, MSS needs to be downgraded. */ - skb_shinfo(skb)->gso_size -= len_diff; + skb_decrease_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; }
skb->protocol = htons(ETH_P_IPV6); @@@ -2129,10 -2123,6 +2129,10 @@@ static int bpf_skb_proto_6_to_4(struct u32 off = skb_mac_header_len(skb); int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ + if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + return -ENOTSUPP; + ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; @@@ -2142,21 -2132,19 +2142,21 @@@ return ret;
if (skb_is_gso(skb)) { + struct skb_shared_info *shinfo = skb_shinfo(skb); + /* SKB_GSO_TCPV6 needs to be changed into * SKB_GSO_TCPV4. */ - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { - skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; - skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + if (shinfo->gso_type & SKB_GSO_TCPV6) { + shinfo->gso_type &= ~SKB_GSO_TCPV6; + shinfo->gso_type |= SKB_GSO_TCPV4; }
/* Due to IPv4 header, MSS can be upgraded. */ - skb_shinfo(skb)->gso_size += len_diff; + skb_increase_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; }
skb->protocol = htons(ETH_P_IP); @@@ -2255,10 -2243,6 +2255,10 @@@ static int bpf_skb_net_grow(struct sk_b u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ + if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + return -ENOTSUPP; + ret = skb_cow(skb, len_diff); if (unlikely(ret < 0)) return ret; @@@ -2268,13 -2252,11 +2268,13 @@@ return ret;
if (skb_is_gso(skb)) { + struct skb_shared_info *shinfo = skb_shinfo(skb); + /* Due to header grow, MSS needs to be downgraded. */ - skb_shinfo(skb)->gso_size -= len_diff; + skb_decrease_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; }
return 0; @@@ -2285,10 -2267,6 +2285,10 @@@ static int bpf_skb_net_shrink(struct sk u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ + if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + return -ENOTSUPP; + ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; @@@ -2298,13 -2276,11 +2298,13 @@@ return ret;
if (skb_is_gso(skb)) { + struct skb_shared_info *shinfo = skb_shinfo(skb); + /* Due to header shrink, MSS can be upgraded. */ - skb_shinfo(skb)->gso_size += len_diff; + skb_increase_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; }
return 0; @@@ -3015,7 -2991,7 +3015,7 @@@ BPF_CALL_4(bpf_skb_set_tunnel_key, stru struct ip_tunnel_info *info;
if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | - BPF_F_DONT_FRAGMENT))) + BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { @@@ -3049,6 -3025,8 +3049,8 @@@ info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; if (flags & BPF_F_ZERO_CSUM_TX) info->key.tun_flags &= ~TUNNEL_CSUM; + if (flags & BPF_F_SEQ_NUMBER) + info->key.tun_flags |= TUNNEL_SEQ;
info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tos = from->tunnel_tos; diff --combined net/core/skbuff.c index 0bb0d8877954,96d36b81a3a5..715c13495ba6 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -77,8 -77,8 +77,8 @@@ #include <linux/capability.h> #include <linux/user_namespace.h>
- struct kmem_cache *skbuff_head_cache __read_mostly; - static struct kmem_cache *skbuff_fclone_cache __read_mostly; + struct kmem_cache *skbuff_head_cache __ro_after_init; + static struct kmem_cache *skbuff_fclone_cache __ro_after_init; int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags);
@@@ -890,7 -890,7 +890,7 @@@ struct sk_buff *skb_morph(struct sk_buf } EXPORT_SYMBOL_GPL(skb_morph);
- static int mm_account_pinned_pages(struct mmpin *mmp, size_t size) + int mm_account_pinned_pages(struct mmpin *mmp, size_t size) { unsigned long max_pg, num_pg, new_pg, old_pg; struct user_struct *user; @@@ -919,14 -919,16 +919,16 @@@
return 0; } + EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
- static void mm_unaccount_pinned_pages(struct mmpin *mmp) + void mm_unaccount_pinned_pages(struct mmpin *mmp) { if (mmp->user) { atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); free_uid(mmp->user); } } + EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) { @@@ -4891,7 -4893,7 +4893,7 @@@ EXPORT_SYMBOL_GPL(skb_scrub_packet) * * The MAC/L2 or network (IP, IPv6) headers are not accounted for. */ -unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) +static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); unsigned int thlen = 0; @@@ -4913,40 -4915,7 +4915,40 @@@ */ return thlen + shinfo->gso_size; } -EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); + +/** + * skb_gso_network_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_network_seglen is used to determine the real size of the + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). + * + * The MAC/L2 header is not accounted for. + */ +static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - + skb_network_header(skb); + + return hdr_len + skb_gso_transport_seglen(skb); +} + +/** + * skb_gso_mac_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_mac_seglen is used to determine the real size of the + * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 + * headers (TCP/UDP). + */ +static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); + + return hdr_len + skb_gso_transport_seglen(skb); +}
/** * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS @@@ -4988,20 -4957,19 +4990,20 @@@ static inline bool skb_gso_size_check(c }
/** - * skb_gso_validate_mtu - Return in case such skb fits a given MTU + * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? * * @skb: GSO skb * @mtu: MTU to validate against * - * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU - * once split. + * skb_gso_validate_network_len validates if a given skb will fit a + * wanted MTU once split. It considers L3 headers, L4 headers, and the + * payload. */ -bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) +bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) { return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); } -EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); +EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
/** * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? diff --combined net/ipv4/ip_gre.c index 0901de42ed85,95fd225f402e..2fa2ef2e2af9 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@@ -522,6 -522,7 +522,7 @@@ err_free_skb static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, __be16 proto) { + struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; struct rtable *rt = NULL; @@@ -545,9 -546,11 +546,11 @@@ if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM))) goto err_free_rt;
- flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); + flags = tun_info->key.tun_flags & + (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); gre_build_header(skb, tunnel_hlen, flags, proto, - tunnel_id_to_key32(tun_info->key.tun_id), 0); + tunnel_id_to_key32(tun_info->key.tun_id), + (flags | TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
@@@ -970,6 -973,9 +973,6 @@@ static void __gre_tunnel_init(struct ne
t_hlen = tunnel->hlen + sizeof(struct iphdr);
- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; - dev->mtu = ETH_DATA_LEN - t_hlen - 4; - dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES;
@@@ -1041,6 -1047,7 +1044,7 @@@ static struct pernet_operations ipgre_n .exit_batch = ipgre_exit_batch_net, .id = &ipgre_net_id, .size = sizeof(struct ip_tunnel_net), + .async = true, };
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], @@@ -1287,6 -1294,8 +1291,6 @@@ static int erspan_tunnel_init(struct ne erspan_hdr_len(tunnel->erspan_ver); t_hlen = tunnel->hlen + sizeof(struct iphdr);
- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; - dev->mtu = ETH_DATA_LEN - t_hlen - 4; dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@@ -1317,6 -1326,12 +1321,12 @@@ static void ipgre_tap_setup(struct net_ ip_tunnel_setup(dev, gre_tap_net_id); }
+ bool is_gretap_dev(const struct net_device *dev) + { + return dev->netdev_ops == &gre_tap_netdev_ops; + } + EXPORT_SYMBOL_GPL(is_gretap_dev); + static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@@ -1618,6 -1633,7 +1628,7 @@@ static struct pernet_operations ipgre_t .exit_batch = ipgre_tap_exit_batch_net, .id = &gre_tap_net_id, .size = sizeof(struct ip_tunnel_net), + .async = true, };
static int __net_init erspan_init_net(struct net *net) @@@ -1636,6 -1652,7 +1647,7 @@@ static struct pernet_operations erspan_ .exit_batch = erspan_exit_batch_net, .id = &erspan_net_id, .size = sizeof(struct ip_tunnel_net), + .async = true, };
static int __init ipgre_init(void) diff --combined net/ipv4/ip_tunnel.c index 6d21068f9b55,b2117d89bc83..d2a099ff6f48 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@@ -290,22 -290,6 +290,6 @@@ failed return ERR_PTR(err); }
- static inline void init_tunnel_flow(struct flowi4 *fl4, - int proto, - __be32 daddr, __be32 saddr, - __be32 key, __u8 tos, int oif, - __u32 mark) - { - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_oif = oif; - fl4->daddr = daddr; - fl4->saddr = saddr; - fl4->flowi4_tos = tos; - fl4->flowi4_proto = proto; - fl4->fl4_gre_key = key; - fl4->flowi4_mark = mark; - } - static int ip_tunnel_bind_dev(struct net_device *dev) { struct net_device *tdev = NULL; @@@ -322,10 -306,10 +306,10 @@@ struct flowi4 fl4; struct rtable *rt;
- init_tunnel_flow(&fl4, iph->protocol, iph->daddr, - iph->saddr, tunnel->parms.o_key, - RT_TOS(iph->tos), tunnel->parms.link, - tunnel->fwmark); + ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr, + iph->saddr, tunnel->parms.o_key, + RT_TOS(iph->tos), tunnel->parms.link, + tunnel->fwmark); rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) { @@@ -581,8 -565,8 +565,8 @@@ void ip_md_tunnel_xmit(struct sk_buff * else if (skb->protocol == htons(ETH_P_IPV6)) tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); } - init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0, - RT_TOS(tos), tunnel->parms.link, tunnel->fwmark); + ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0, + RT_TOS(tos), tunnel->parms.link, tunnel->fwmark); if (tunnel->encap.type != TUNNEL_ENCAP_NONE) goto tx_error; rt = ip_route_output_key(tunnel->net, &fl4); @@@ -710,9 -694,16 +694,9 @@@ void ip_tunnel_xmit(struct sk_buff *skb } }
- init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, - tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, - tunnel->fwmark); - if (tunnel->fwmark) { - ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, - tunnel->parms.o_key, RT_TOS(tos), - tunnel->parms.link, tunnel->fwmark); - } - else { - ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, - tunnel->parms.o_key, RT_TOS(tos), - tunnel->parms.link, skb->mark); - } ++ ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, ++ tunnel->parms.o_key, RT_TOS(tos), ++ tunnel->parms.link, tunnel->fwmark);
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) goto tx_error; diff --combined net/ipv4/netfilter/ipt_CLUSTERIP.c index 8a8ae61cea71,08b3e48f44fc..0fc88fa7a4dc --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@@ -232,6 -232,7 +232,6 @@@ clusterip_config_init(struct net *net, c->hash_mode = i->hash_mode; c->hash_initval = i->hash_initval; refcount_set(&c->refcount, 1); - refcount_set(&c->entries, 1);
spin_lock_bh(&cn->lock); if (__clusterip_config_find(net, ip)) { @@@ -262,10 -263,8 +262,10 @@@
c->notifier.notifier_call = clusterip_netdev_event; err = register_netdevice_notifier(&c->notifier); - if (!err) + if (!err) { + refcount_set(&c->entries, 1); return c; + }
#ifdef CONFIG_PROC_FS proc_remove(c->pde); @@@ -274,7 -273,7 +274,7 @@@ err spin_lock_bh(&cn->lock); list_del_rcu(&c->list); spin_unlock_bh(&cn->lock); - kfree(c); + clusterip_config_put(c);
return ERR_PTR(err); } @@@ -497,15 -496,12 +497,15 @@@ static int clusterip_tg_check(const str return PTR_ERR(config); } } - cipinfo->config = config;
ret = nf_ct_netns_get(par->net, par->family); - if (ret < 0) + if (ret < 0) { pr_info("cannot load conntrack support for proto=%u\n", par->family); + clusterip_config_entry_put(par->net, config); + clusterip_config_put(config); + return ret; + }
if (!par->net->xt.clusterip_deprecated_warning) { pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " @@@ -513,7 -509,6 +513,7 @@@ par->net->xt.clusterip_deprecated_warning = true; }
+ cipinfo->config = config; return ret; }
@@@ -845,6 -840,7 +845,7 @@@ static struct pernet_operations cluster .exit = clusterip_net_exit, .id = &clusterip_net_id, .size = sizeof(struct clusterip_net), + .async = true, };
static int __init clusterip_tg_init(void) diff --combined net/ipv4/route.c index f9dbb8cb66bf,6a7b3cba3972..d7431164b33e --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -128,11 -128,10 +128,11 @@@ static int ip_rt_redirect_silence __rea static int ip_rt_error_cost __read_mostly = HZ; static int ip_rt_error_burst __read_mostly = 5 * HZ; static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; -static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; +static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; + /* * Interface to generic destination cache. */ @@@ -418,6 -417,7 +418,7 @@@ static void __net_exit ip_rt_do_proc_ex static struct pernet_operations ip_rt_proc_ops __net_initdata = { .init = ip_rt_do_proc_init, .exit = ip_rt_do_proc_exit, + .async = true, };
static int __init ip_rt_proc_init(void) @@@ -931,23 -931,14 +932,23 @@@ out_put_peer
static int ip_error(struct sk_buff *skb) { - struct in_device *in_dev = __in_dev_get_rcu(skb->dev); struct rtable *rt = skb_rtable(skb); + struct net_device *dev = skb->dev; + struct in_device *in_dev; struct inet_peer *peer; unsigned long now; struct net *net; bool send; int code;
+ if (netif_is_l3_master(skb->dev)) { + dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); + if (!dev) + goto out; + } + + in_dev = __in_dev_get_rcu(dev); + /* IP on this device is disabled. */ if (!in_dev) goto out; @@@ -1393,7 -1384,7 +1394,7 @@@ struct uncached_list
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
-static void rt_add_uncached_list(struct rtable *rt) +void rt_add_uncached_list(struct rtable *rt) { struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
@@@ -1404,8 -1395,14 +1405,8 @@@ spin_unlock_bh(&ul->lock); }
-static void ipv4_dst_destroy(struct dst_entry *dst) +void rt_del_uncached_list(struct rtable *rt) { - struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); - struct rtable *rt = (struct rtable *) dst; - - if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) - kfree(p); - if (!list_empty(&rt->rt_uncached)) { struct uncached_list *ul = rt->rt_uncached_list;
@@@ -1415,17 -1412,6 +1416,17 @@@ } }
+static void ipv4_dst_destroy(struct dst_entry *dst) +{ + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); + struct rtable *rt = (struct rtable *)dst; + + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); + + rt_del_uncached_list(rt); +} + void rt_flush_dev(struct net_device *dev) { struct net *net = dev_net(dev); @@@ -1523,7 -1509,6 +1524,6 @@@ struct rtable *rt_dst_alloc(struct net_ rt->rt_pmtu = 0; rt->rt_gateway = 0; rt->rt_uses_gateway = 0; - rt->rt_table_id = 0; INIT_LIST_HEAD(&rt->rt_uncached);
rt->dst.output = ip_output; @@@ -1659,19 -1644,6 +1659,6 @@@ static void ip_del_fnhe(struct fib_nh * spin_unlock_bh(&fnhe_lock); }
- static void set_lwt_redirect(struct rtable *rth) - { - if (lwtunnel_output_redirect(rth->dst.lwtstate)) { - rth->dst.lwtstate->orig_output = rth->dst.output; - rth->dst.output = lwtunnel_output; - } - - if (lwtunnel_input_redirect(rth->dst.lwtstate)) { - rth->dst.lwtstate->orig_input = rth->dst.input; - rth->dst.input = lwtunnel_input; - } - } - /* called in rcu_read_lock() section */ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, @@@ -1754,15 -1726,13 +1741,13 @@@ rt_cache }
rth->rt_is_input = 1; - if (res->table) - rth->rt_table_id = res->table->tb_id; RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag, do_cache); - set_lwt_redirect(rth); + lwtunnel_set_redirect(&rth->dst); skb_dst_set(skb, &rth->dst); out: err = 0; @@@ -1778,44 -1748,45 +1763,45 @@@ static void ip_multipath_l3_keys(const struct flow_keys *hash_keys) { const struct iphdr *outer_iph = ip_hdr(skb); + const struct iphdr *key_iph = outer_iph; const struct iphdr *inner_iph; const struct icmphdr *icmph; struct iphdr _inner_iph; struct icmphdr _icmph;
- hash_keys->addrs.v4addrs.src = outer_iph->saddr; - hash_keys->addrs.v4addrs.dst = outer_iph->daddr; if (likely(outer_iph->protocol != IPPROTO_ICMP)) - return; + goto out;
if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) - return; + goto out;
icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), &_icmph); if (!icmph) - return; + goto out;
if (icmph->type != ICMP_DEST_UNREACH && icmph->type != ICMP_REDIRECT && icmph->type != ICMP_TIME_EXCEEDED && icmph->type != ICMP_PARAMETERPROB) - return; + goto out;
inner_iph = skb_header_pointer(skb, outer_iph->ihl * 4 + sizeof(_icmph), sizeof(_inner_iph), &_inner_iph); if (!inner_iph) - return; - hash_keys->addrs.v4addrs.src = inner_iph->saddr; - hash_keys->addrs.v4addrs.dst = inner_iph->daddr; + goto out; + + key_iph = inner_iph; + out: + hash_keys->addrs.v4addrs.src = key_iph->saddr; + hash_keys->addrs.v4addrs.dst = key_iph->daddr; }
/* if skb is set it will be used and fl4 can be NULL */ - int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4, - const struct sk_buff *skb) + int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, + const struct sk_buff *skb, struct flow_keys *flkeys) { - struct net *net = fi->fib_net; struct flow_keys hash_keys; u32 mhash;
@@@ -1839,15 -1810,20 +1825,20 @@@ /* short-circuit if we already have L4 hash present */ if (skb->l4_hash) return skb_get_hash_raw(skb) >> 1; + memset(&hash_keys, 0, sizeof(hash_keys)); - skb_flow_dissect_flow_keys(skb, &keys, flag); + + if (!flkeys) { + skb_flow_dissect_flow_keys(skb, &keys, flag); + flkeys = &keys; + }
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; - hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; - hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; - hash_keys.ports.src = keys.ports.src; - hash_keys.ports.dst = keys.ports.dst; - hash_keys.basic.ip_proto = keys.basic.ip_proto; + hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; + hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; + hash_keys.ports.src = flkeys->ports.src; + hash_keys.ports.dst = flkeys->ports.dst; + hash_keys.basic.ip_proto = flkeys->basic.ip_proto; } else { memset(&hash_keys, 0, sizeof(hash_keys)); hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; @@@ -1863,17 -1839,17 +1854,17 @@@
return mhash >> 1; } - EXPORT_SYMBOL_GPL(fib_multipath_hash); #endif /* CONFIG_IP_ROUTE_MULTIPATH */
static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, struct in_device *in_dev, - __be32 daddr, __be32 saddr, u32 tos) + __be32 daddr, __be32 saddr, u32 tos, + struct flow_keys *hkeys) { #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1) { - int h = fib_multipath_hash(res->fi, NULL, skb); + int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
fib_select_multipath(res, h); } @@@ -1899,13 -1875,14 +1890,14 @@@ static int ip_route_input_slow(struct s struct fib_result *res) { struct in_device *in_dev = __in_dev_get_rcu(dev); + struct flow_keys *flkeys = NULL, _flkeys; + struct net *net = dev_net(dev); struct ip_tunnel_info *tun_info; - struct flowi4 fl4; + int err = -EINVAL; unsigned int flags = 0; u32 itag = 0; struct rtable *rth; - int err = -EINVAL; - struct net *net = dev_net(dev); + struct flowi4 fl4; bool do_cache;
/* IP on this device is disabled. */ @@@ -1964,6 -1941,10 +1956,10 @@@ fl4.daddr = daddr; fl4.saddr = saddr; fl4.flowi4_uid = sock_net_uid(net, NULL); + + if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) + flkeys = &_flkeys; + err = fib_lookup(net, &fl4, res, 0); if (err != 0) { if (!IN_DEV_FORWARD(in_dev)) @@@ -1989,7 -1970,7 +1985,7 @@@ if (res->type != RTN_UNICAST) goto martian_destination;
- err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos); + err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys); out: return err;
brd_input: @@@ -2031,8 -2012,6 +2027,6 @@@ local_input rth->dst.tclassid = itag; #endif rth->rt_is_input = 1; - if (res->table) - rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(in_slow_tot); if (res->type == RTN_UNREACHABLE) { @@@ -2261,8 -2240,6 +2255,6 @@@ add return ERR_PTR(-ENOBUFS);
rth->rt_iif = orig_oif; - if (res->table) - rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(out_slow_tot);
@@@ -2284,7 -2261,7 +2276,7 @@@ }
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache); - set_lwt_redirect(rth); + lwtunnel_set_redirect(&rth->dst);
return rth; } @@@ -2792,7 -2769,7 +2784,7 @@@ static int inet_rtm_getroute(struct sk_ rt->rt_flags |= RTCF_NOTIFY;
if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE) - table_id = rt->rt_table_id; + table_id = res.table ? res.table->tb_id : 0;
if (rtm->rtm_flags & RTM_F_FIB_MATCH) { if (!res.fi) { @@@ -2833,7 -2810,6 +2825,7 @@@ void ip_rt_multicast_event(struct in_de static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_gc_elasticity __read_mostly = 8; +static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, void __user *buffer, @@@ -2949,8 -2925,7 +2941,8 @@@ static struct ctl_table ipv4_route_tabl .data = &ip_rt_min_pmtu, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &ip_min_valid_pmtu, }, { .procname = "min_adv_mss", @@@ -3013,6 -2988,7 +3005,7 @@@ static __net_exit void sysctl_route_net static __net_initdata struct pernet_operations sysctl_route_ops = { .init = sysctl_route_net_init, .exit = sysctl_route_net_exit, + .async = true, }; #endif
@@@ -3026,6 -3002,7 +3019,7 @@@ static __net_init int rt_genid_init(str
static __net_initdata struct pernet_operations rt_genid_ops = { .init = rt_genid_init, + .async = true, };
static int __net_init ipv4_inetpeer_init(struct net *net) @@@ -3051,6 -3028,7 +3045,7 @@@ static void __net_exit ipv4_inetpeer_ex static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { .init = ipv4_inetpeer_init, .exit = ipv4_inetpeer_exit, + .async = true, };
#ifdef CONFIG_IP_ROUTE_CLASSID diff --combined net/ipv4/tcp_input.c index 9a1b3c1c1c14,06b9c4765f42..451ef3012636 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -1358,9 -1358,6 +1358,6 @@@ static struct sk_buff *tcp_shift_skb_da int len; int in_sack;
- if (!sk_can_gso(sk)) - goto fallback; - /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) @@@ -1971,6 -1968,11 +1968,6 @@@ void tcp_enter_loss(struct sock *sk /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous * loss recovery is underway except recurring timeout(s) on * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing - * - * In theory F-RTO can be used repeatedly during loss recovery. - * In practice this interacts badly with broken middle-boxes that - * falsely raise the receive window, which results in repeated - * timeouts and stop-and-go behavior. */ tp->frto = net->ipv4.sysctl_tcp_frto && (new_recovery || icsk->icsk_retransmits) && @@@ -2626,14 -2628,18 +2623,14 @@@ static void tcp_process_loss(struct soc tcp_try_undo_loss(sk, false)) return;
- /* The ACK (s)acks some never-retransmitted data meaning not all - * the data packets before the timeout were lost. Therefore we - * undo the congestion window and state. This is essentially - * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since - * a retransmitted skb is permantly marked, we can apply such an - * operation even if F-RTO was not used. - */ - if ((flag & FLAG_ORIG_SACK_ACKED) && - tcp_try_undo_loss(sk, tp->undo_marker)) - return; - if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ + /* Step 3.b. A timeout is spurious if not all data are + * lost, i.e., never-retransmitted data are (s)acked. + */ + if ((flag & FLAG_ORIG_SACK_ACKED) && + tcp_try_undo_loss(sk, true)) + return; + if (after(tp->snd_nxt, tp->high_seq)) { if (flag & FLAG_DATA_SACKED || is_dupack) tp->frto = 0; /* Step 3.a. loss was real */ @@@ -3992,7 -3998,6 +3989,7 @@@ void tcp_reset(struct sock *sk /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb();
+ tcp_write_queue_purge(sk); tcp_done(sk);
if (!sock_flag(sk, SOCK_DEAD)) @@@ -5862,10 -5867,12 +5859,12 @@@ int tcp_rcv_state_process(struct sock * tp->rx_opt.saw_tstamp = 0; req = tp->fastopen_rsk; if (req) { + bool req_stolen; + WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1);
- if (!tcp_check_req(sk, skb, req, true)) + if (!tcp_check_req(sk, skb, req, true, &req_stolen)) goto discard; }
diff --combined net/ipv4/xfrm4_policy.c index 8d33f7b311f4,0c752dc3f93b..1b567fe5493c --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@@ -100,9 -100,7 +100,8 @@@ static int xfrm4_fill_dst(struct xfrm_d xdst->u.rt.rt_gateway = rt->rt_gateway; xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; xdst->u.rt.rt_pmtu = rt->rt_pmtu; - xdst->u.rt.rt_table_id = rt->rt_table_id; INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); + rt_add_uncached_list(&xdst->u.rt);
return 0; } @@@ -242,8 -240,7 +241,8 @@@ static void xfrm4_dst_destroy(struct ds struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
dst_destroy_metrics_generic(dst); - + if (xdst->u.rt.rt_uncached_list) + rt_del_uncached_list(&xdst->u.rt); xfrm_dst_destroy(xdst); }
@@@ -367,6 -364,7 +366,7 @@@ static void __net_exit xfrm4_net_exit(s static struct pernet_operations __net_initdata xfrm4_net_ops = { .init = xfrm4_net_init, .exit = xfrm4_net_exit, + .async = true, };
static void __init xfrm4_policy_init(void) @@@ -381,4 -379,3 +381,3 @@@ void __init xfrm4_init(void xfrm4_protocol_init(); register_pernet_subsys(&xfrm4_net_ops); } - diff --combined net/ipv6/ip6_output.c index a8a919520090,a6eb0e699b15..2c7f09c3c39e --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@@ -71,7 -71,7 +71,7 @@@ static int ip6_finish_output2(struct ne struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) && - ((mroute6_socket(net, skb) && + ((mroute6_is_socket(net, skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { @@@ -412,7 -412,7 +412,7 @@@ static bool ip6_pkt_too_big(const struc if (skb->ignore_df) return false;
- if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) + if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) return false;
return true; diff --combined net/ipv6/ip6_tunnel.c index 6e0f21eed88a,1124f310df5a..56c4967f1868 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@@ -679,7 -679,7 +679,7 @@@ ip6ip6_err(struct sk_buff *skb, struct
/* Try to guess incoming interface */ rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, - NULL, 0, 0); + NULL, 0, skb2, 0);
if (rt && rt->dst.dev) skb2->dev = rt->dst.dev; @@@ -1444,7 -1444,7 +1444,7 @@@ static void ip6_tnl_link_config(struct
struct rt6_info *rt = rt6_lookup(t->net, &p->raddr, &p->laddr, - p->link, strict); + p->link, NULL, strict);
if (!rt) return; @@@ -1982,14 -1982,14 +1982,14 @@@ static int ip6_tnl_newlink(struct net * { struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); - struct ip6_tnl *nt, *t; struct ip_tunnel_encap ipencap; + struct ip6_tnl *nt, *t; + int err;
nt = netdev_priv(dev);
if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { - int err = ip6_tnl_encap_setup(nt, &ipencap); - + err = ip6_tnl_encap_setup(nt, &ipencap); if (err < 0) return err; } @@@ -2005,11 -2005,7 +2005,11 @@@ return -EEXIST; }
- return ip6_tnl_create2(dev); + err = ip6_tnl_create2(dev); + if (!err && tb[IFLA_MTU]) + ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); + + return err; }
static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], @@@ -2254,6 -2250,7 +2254,7 @@@ static struct pernet_operations ip6_tnl .exit_batch = ip6_tnl_exit_batch_net, .id = &ip6_tnl_net_id, .size = sizeof(struct ip6_tnl_net), + .async = true, };
/** diff --combined net/ipv6/netfilter/ip6t_rpfilter.c index 91ed25a24b79,910a27318f58..d12f511929f5 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@@ -48,8 -48,12 +48,8 @@@ static bool rpfilter_lookup_reverse6(st }
fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; - if ((flags & XT_RPFILTER_LOOSE) == 0) { - fl6.flowi6_oif = dev->ifindex; - lookup_flags |= RT6_LOOKUP_F_IFACE; - }
- rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); + rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags); if (rt->dst.error) goto out;
diff --combined net/ipv6/netfilter/nft_fib_ipv6.c index 62fc84d7bdff,3230b3d7b11b..36be3cf0adef --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@@ -180,7 -180,9 +180,8 @@@ void nft_fib6_eval(const struct nft_exp }
*dest = 0; - rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); - again: + rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb, + lookup_flags); if (rt->dst.error) goto put_rt_err;
@@@ -188,8 -190,15 +189,8 @@@ if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) goto put_rt_err;
- if (oif && oif != rt->rt6i_idev->dev) { - /* multipath route? Try again with F_IFACE */ - if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) { - lookup_flags |= RT6_LOOKUP_F_IFACE; - fl6.flowi6_oif = oif->ifindex; - ip6_rt_put(rt); - goto again; - } - } + if (oif && oif != rt->rt6i_idev->dev) + goto put_rt_err;
switch (priv->result) { case NFT_FIB_RESULT_OIF: diff --combined net/ipv6/route.c index cef76eb9b9c4,f0ae58424c45..6c5dd170c61f --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -128,7 -128,7 +128,7 @@@ struct uncached_list
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
-static void rt6_uncached_list_add(struct rt6_info *rt) +void rt6_uncached_list_add(struct rt6_info *rt) { struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
@@@ -139,7 -139,7 +139,7 @@@ spin_unlock_bh(&ul->lock); }
-static void rt6_uncached_list_del(struct rt6_info *rt) +void rt6_uncached_list_del(struct rt6_info *rt) { if (!list_empty(&rt->rt6i_uncached)) { struct uncached_list *ul = rt->rt6i_uncached_list; @@@ -450,8 -450,10 +450,10 @@@ static bool rt6_check_expired(const str return false; }
- static struct rt6_info *rt6_multipath_select(struct rt6_info *match, + static struct rt6_info *rt6_multipath_select(const struct net *net, + struct rt6_info *match, struct flowi6 *fl6, int oif, + const struct sk_buff *skb, int strict) { struct rt6_info *sibling, *next_sibling; @@@ -460,7 -462,7 +462,7 @@@ * case it will always be non-zero. Otherwise now is the time to do it. */ if (!fl6->mp_hash) - fl6->mp_hash = rt6_multipath_hash(fl6, NULL); + fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound)) return match; @@@ -914,7 -916,9 +916,9 @@@ static bool ip6_hold_safe(struct net *n
static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, - struct flowi6 *fl6, int flags) + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { struct rt6_info *rt, *rt_cache; struct fib6_node *fn; @@@ -929,8 -933,8 +933,8 @@@ restart rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) - rt = rt6_multipath_select(rt, fl6, - fl6->flowi6_oif, flags); + rt = rt6_multipath_select(net, rt, fl6, fl6->flowi6_oif, + skb, flags); } if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); @@@ -954,14 -958,15 +958,15 @@@ }
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, - int flags) + const struct sk_buff *skb, int flags) { - return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); + return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup); } EXPORT_SYMBOL_GPL(ip6_route_lookup);
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, - const struct in6_addr *saddr, int oif, int strict) + const struct in6_addr *saddr, int oif, + const struct sk_buff *skb, int strict) { struct flowi6 fl6 = { .flowi6_oif = oif, @@@ -975,7 -980,7 +980,7 @@@ flags |= RT6_LOOKUP_F_HAS_SADDR; }
- dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup); + dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup); if (dst->error == 0) return (struct rt6_info *) dst;
@@@ -1647,7 -1652,8 +1652,8 @@@ void rt6_age_exceptions(struct rt6_inf }
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, - int oif, struct flowi6 *fl6, int flags) + int oif, struct flowi6 *fl6, + const struct sk_buff *skb, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt, *rt_cache; @@@ -1669,7 -1675,7 +1675,7 @@@ redo_rt6_select: rt = rt6_select(net, fn, oif, strict); if (rt->rt6i_nsiblings) - rt = rt6_multipath_select(rt, fl6, oif, strict); + rt = rt6_multipath_select(net, rt, fl6, oif, skb, strict); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) @@@ -1768,28 -1774,35 +1774,35 @@@ uncached_rt_out } EXPORT_SYMBOL_GPL(ip6_pol_route);
- static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, - struct flowi6 *fl6, int flags) + static struct rt6_info *ip6_pol_route_input(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { - return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); + return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags); }
struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, - struct flowi6 *fl6, int flags) + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE;
- return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); + return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input); } EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
static void ip6_multipath_l3_keys(const struct sk_buff *skb, - struct flow_keys *keys) + struct flow_keys *keys, + struct flow_keys *flkeys) { const struct ipv6hdr *outer_iph = ipv6_hdr(skb); const struct ipv6hdr *key_iph = outer_iph; + struct flow_keys *_flkeys = flkeys; const struct ipv6hdr *inner_iph; const struct icmp6hdr *icmph; struct ipv6hdr _inner_iph; @@@ -1811,26 -1824,76 +1824,76 @@@ goto out;
key_iph = inner_iph; + _flkeys = NULL; out: - memset(keys, 0, sizeof(*keys)); - keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - keys->addrs.v6addrs.src = key_iph->saddr; - keys->addrs.v6addrs.dst = key_iph->daddr; - keys->tags.flow_label = ip6_flowinfo(key_iph); - keys->basic.ip_proto = key_iph->nexthdr; + if (_flkeys) { + keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src; + keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst; + keys->tags.flow_label = _flkeys->tags.flow_label; + keys->basic.ip_proto = _flkeys->basic.ip_proto; + } else { + keys->addrs.v6addrs.src = key_iph->saddr; + keys->addrs.v6addrs.dst = key_iph->daddr; + keys->tags.flow_label = ip6_flowinfo(key_iph); + keys->basic.ip_proto = key_iph->nexthdr; + } }
/* if skb is set it will be used and fl6 can be NULL */ - u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb) + u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, + const struct sk_buff *skb, struct flow_keys *flkeys) { struct flow_keys hash_keys; + u32 mhash;
- if (skb) { - ip6_multipath_l3_keys(skb, &hash_keys); - return flow_hash_from_keys(&hash_keys) >> 1; + switch (net->ipv6.sysctl.multipath_hash_policy) { + case 0: + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + if (skb) { + ip6_multipath_l3_keys(skb, &hash_keys, flkeys); + } else { + hash_keys.addrs.v6addrs.src = fl6->saddr; + hash_keys.addrs.v6addrs.dst = fl6->daddr; + hash_keys.tags.flow_label = (__force u32)fl6->flowlabel; + hash_keys.basic.ip_proto = fl6->flowi6_proto; + } + break; + case 1: + if (skb) { + unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; + struct flow_keys keys; + + /* short-circuit if we already have L4 hash present */ + if (skb->l4_hash) + return skb_get_hash_raw(skb) >> 1; + + memset(&hash_keys, 0, sizeof(hash_keys)); + + if (!flkeys) { + skb_flow_dissect_flow_keys(skb, &keys, flag); + flkeys = &keys; + } + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src; + hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst; + hash_keys.ports.src = flkeys->ports.src; + hash_keys.ports.dst = flkeys->ports.dst; + hash_keys.basic.ip_proto = flkeys->basic.ip_proto; + } else { + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + hash_keys.addrs.v6addrs.src = fl6->saddr; + hash_keys.addrs.v6addrs.dst = fl6->daddr; + hash_keys.ports.src = fl6->fl6_sport; + hash_keys.ports.dst = fl6->fl6_dport; + hash_keys.basic.ip_proto = fl6->flowi6_proto; + } + break; } + mhash = flow_hash_from_keys(&hash_keys);
- return get_hash_from_flowi6(fl6) >> 1; + return mhash >> 1; }
void ip6_route_input(struct sk_buff *skb) @@@ -1847,20 -1910,29 +1910,29 @@@ .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, }; + struct flow_keys *flkeys = NULL, _flkeys;
tun_info = skb_tunnel_info(skb); if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; + + if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys)) + flkeys = &_flkeys; + if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6)) - fl6.mp_hash = rt6_multipath_hash(&fl6, skb); + fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys); skb_dst_drop(skb); - skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); + skb_dst_set(skb, + ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags)); }
- static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, - struct flowi6 *fl6, int flags) + static struct rt6_info *ip6_pol_route_output(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) { - return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); + return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags); }
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, @@@ -1888,7 -1960,7 +1960,7 @@@ else if (sk) flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
- return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); + return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output); } EXPORT_SYMBOL_GPL(ip6_route_output_flags);
@@@ -2137,6 -2209,7 +2209,7 @@@ struct ip6rd_flowi static struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, + const struct sk_buff *skb, int flags) { struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; @@@ -2210,8 -2283,9 +2283,9 @@@ out };
static struct dst_entry *ip6_route_redirect(struct net *net, - const struct flowi6 *fl6, - const struct in6_addr *gateway) + const struct flowi6 *fl6, + const struct sk_buff *skb, + const struct in6_addr *gateway) { int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip6rd_flowi rdfl; @@@ -2219,7 -2293,7 +2293,7 @@@ rdfl.fl6 = *fl6; rdfl.gateway = *gateway;
- return fib6_rule_lookup(net, &rdfl.fl6, + return fib6_rule_lookup(net, &rdfl.fl6, skb, flags, __ip6_route_redirect); }
@@@ -2239,7 -2313,7 +2313,7 @@@ void ip6_redirect(struct sk_buff *skb, fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid;
- dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); + dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); } @@@ -2261,7 -2335,7 +2335,7 @@@ void ip6_redirect_no_header(struct sk_b fl6.saddr = iph->daddr; fl6.flowi6_uid = sock_net_uid(net, NULL);
- dst = ip6_route_redirect(net, &fl6, &iph->saddr); + dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); } @@@ -2463,7 -2537,7 +2537,7 @@@ static struct rt6_info *ip6_nh_lookup_t flags |= RT6_LOOKUP_F_HAS_SADDR;
flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE; - rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); + rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
/* if table lookup failed, fall back to full lookup */ if (rt == net->ipv6.ip6_null_entry) { @@@ -2526,7 -2600,7 +2600,7 @@@ static int ip6_route_check_nh(struct ne }
if (!grt) - grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); + grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
if (!grt) goto out; @@@ -2671,14 -2745,7 +2745,7 @@@ static struct rt6_info *ip6_route_info_ if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); - if (lwtunnel_output_redirect(rt->dst.lwtstate)) { - rt->dst.lwtstate->orig_output = rt->dst.output; - rt->dst.output = lwtunnel_output; - } - if (lwtunnel_input_redirect(rt->dst.lwtstate)) { - rt->dst.lwtstate->orig_input = rt->dst.input; - rt->dst.input = lwtunnel_input; - } + lwtunnel_set_redirect(&rt->dst); }
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); @@@ -4598,7 -4665,7 +4665,7 @@@ static int inet6_rtm_getroute(struct sk if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR;
- dst = ip6_route_input_lookup(net, dev, &fl6, flags); + dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
rcu_read_unlock(); } else { @@@ -4979,6 -5046,7 +5046,7 @@@ static void __net_exit ip6_route_net_ex static struct pernet_operations ip6_route_net_ops = { .init = ip6_route_net_init, .exit = ip6_route_net_exit, + .async = true, };
static int __net_init ipv6_inetpeer_init(struct net *net) @@@ -5004,11 -5072,13 +5072,13 @@@ static void __net_exit ipv6_inetpeer_ex static struct pernet_operations ipv6_inetpeer_ops = { .init = ipv6_inetpeer_init, .exit = ipv6_inetpeer_exit, + .async = true, };
static struct pernet_operations ip6_route_net_late_ops = { .init = ip6_route_net_init_late, .exit = ip6_route_net_exit_late, + .async = true, };
static struct notifier_block ip6_route_dev_notifier = { diff --combined net/ipv6/sit.c index 0195598f7bb5,182db078f01e..a9c4ac6efe22 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@@ -1578,13 -1578,6 +1578,13 @@@ static int ipip6_newlink(struct net *sr if (err < 0) return err;
+ if (tb[IFLA_MTU]) { + u32 mtu = nla_get_u32(tb[IFLA_MTU]); + + if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) + dev->mtu = mtu; + } + #ifdef CONFIG_IPV6_SIT_6RD if (ipip6_netlink_6rd_parms(data, &ip6rd)) err = ipip6_tunnel_update_6rd(nt, &ip6rd); @@@ -1885,6 -1878,7 +1885,7 @@@ static struct pernet_operations sit_net .exit_batch = sit_exit_batch_net, .id = &sit_net_id, .size = sizeof(struct sit_net), + .async = true, };
static void __exit sit_cleanup(void) diff --combined net/ipv6/xfrm6_policy.c index 416fe67271a9,88cd0c90fa81..cbb270bd81b0 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@@ -113,9 -113,6 +113,9 @@@ static int xfrm6_fill_dst(struct xfrm_d xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; xdst->u.rt6.rt6i_dst = rt->rt6i_dst; xdst->u.rt6.rt6i_src = rt->rt6i_src; + INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached); + rt6_uncached_list_add(&xdst->u.rt6); + atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
return 0; } @@@ -247,8 -244,6 +247,8 @@@ static void xfrm6_dst_destroy(struct ds if (likely(xdst->u.rt6.rt6i_idev)) in6_dev_put(xdst->u.rt6.rt6i_idev); dst_destroy_metrics_generic(dst); + if (xdst->u.rt6.rt6i_uncached_list) + rt6_uncached_list_del(&xdst->u.rt6); xfrm_dst_destroy(xdst); }
@@@ -400,6 -395,7 +400,7 @@@ static void __net_exit xfrm6_net_exit(s static struct pernet_operations xfrm6_net_ops = { .init = xfrm6_net_init, .exit = xfrm6_net_exit, + .async = true, };
int __init xfrm6_init(void) diff --combined net/l2tp/l2tp_ip.c index 3428fba6f2b7,4614585e1720..a9c05b2bc1b0 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@@ -234,13 -234,17 +234,13 @@@ static void l2tp_ip_close(struct sock * static void l2tp_ip_destroy_sock(struct sock *sk) { struct sk_buff *skb; - struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); + struct l2tp_tunnel *tunnel = sk->sk_user_data;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) kfree_skb(skb);
- if (tunnel) { - l2tp_tunnel_closeall(tunnel); - sock_put(sk); - } - - sk_refcnt_debug_dec(sk); + if (tunnel) + l2tp_tunnel_delete(tunnel); }
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) @@@ -345,7 -349,7 +345,7 @@@ static int l2tp_ip_disconnect(struct so }
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, - int *uaddr_len, int peer) + int peer) { struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); @@@ -366,8 -370,7 +366,7 @@@ lsa->l2tp_conn_id = lsk->conn_id; lsa->l2tp_addr.s_addr = addr; } - *uaddr_len = sizeof(*lsa); - return 0; + return sizeof(*lsa); }
static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) diff --combined net/l2tp/l2tp_ip6.c index 6f009eaa5fbe,efea58b66295..957369192ca1 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@@ -248,14 -248,16 +248,14 @@@ static void l2tp_ip6_close(struct sock
static void l2tp_ip6_destroy_sock(struct sock *sk) { - struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); + struct l2tp_tunnel *tunnel = sk->sk_user_data;
lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk);
- if (tunnel) { - l2tp_tunnel_closeall(tunnel); - sock_put(sk); - } + if (tunnel) + l2tp_tunnel_delete(tunnel);
inet6_destroy_sock(sk); } @@@ -419,7 -421,7 +419,7 @@@ static int l2tp_ip6_disconnect(struct s }
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, - int *uaddr_len, int peer) + int peer) { struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; struct sock *sk = sock->sk; @@@ -447,8 -449,7 +447,7 @@@ } if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) lsa->l2tp_scope_id = sk->sk_bound_dev_if; - *uaddr_len = sizeof(*lsa); - return 0; + return sizeof(*lsa); }
static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) diff --combined net/l2tp/l2tp_ppp.c index 3b02f24ea9ec,0c4f49a6a0cb..977bca659787 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@@ -416,28 -416,20 +416,28 @@@ abort * Session (and tunnel control) socket create/destroy. *****************************************************************************/
+static void pppol2tp_put_sk(struct rcu_head *head) +{ + struct pppol2tp_session *ps; + + ps = container_of(head, typeof(*ps), rcu); + sock_put(ps->__sk); +} + /* Called by l2tp_core when a session socket is being closed. */ static void pppol2tp_session_close(struct l2tp_session *session) { - struct sock *sk; - - BUG_ON(session->magic != L2TP_SESSION_MAGIC); + struct pppol2tp_session *ps;
- sk = pppol2tp_session_get_sock(session); - if (sk) { - if (sk->sk_socket) - inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); - sock_put(sk); - } + ps = l2tp_session_priv(session); + mutex_lock(&ps->sk_lock); + ps->__sk = rcu_dereference_protected(ps->sk, + lockdep_is_held(&ps->sk_lock)); + RCU_INIT_POINTER(ps->sk, NULL); + if (ps->__sk) + call_rcu(&ps->rcu, pppol2tp_put_sk); + mutex_unlock(&ps->sk_lock); }
/* Really kill the session socket. (Called from sock_put() if @@@ -457,6 -449,14 +457,6 @@@ static void pppol2tp_session_destruct(s } }
-static void pppol2tp_put_sk(struct rcu_head *head) -{ - struct pppol2tp_session *ps; - - ps = container_of(head, typeof(*ps), rcu); - sock_put(ps->__sk); -} - /* Called when the PPPoX socket (session) is closed. */ static int pppol2tp_release(struct socket *sock) @@@ -480,17 -480,26 +480,17 @@@ sock_orphan(sk); sock->sk = NULL;
+ /* If the socket is associated with a session, + * l2tp_session_delete will call pppol2tp_session_close which + * will drop the session's ref on the socket. + */ session = pppol2tp_sock_to_session(sk); - - if (session != NULL) { - struct pppol2tp_session *ps; - + if (session) { l2tp_session_delete(session); - - ps = l2tp_session_priv(session); - mutex_lock(&ps->sk_lock); - ps->__sk = rcu_dereference_protected(ps->sk, - lockdep_is_held(&ps->sk_lock)); - RCU_INIT_POINTER(ps->sk, NULL); - mutex_unlock(&ps->sk_lock); - call_rcu(&ps->rcu, pppol2tp_put_sk); - - /* Rely on the sock_put() call at the end of the function for - * dropping the reference held by pppol2tp_sock_to_session(). - * The last reference will be dropped by pppol2tp_put_sk(). - */ + /* drop the ref obtained by pppol2tp_sock_to_session */ + sock_put(sk); } + release_sock(sk);
/* This will delete the session context via @@@ -787,7 -796,6 +787,7 @@@ static int pppol2tp_connect(struct sock
out_no_ppp: /* This is how we get the session context from the socket. */ + sock_hold(sk); sk->sk_user_data = session; rcu_assign_pointer(ps->sk, sk); mutex_unlock(&ps->sk_lock); @@@ -862,7 -870,7 +862,7 @@@ err /* getname() support. */ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, - int *usockaddr_len, int peer) + int peer) { int len = 0; int error = 0; @@@ -961,8 -969,7 +961,7 @@@ memcpy(uaddr, &sp, len); }
- *usockaddr_len = len; - error = 0; + error = len;
sock_put(sk); end: @@@ -1763,6 -1770,7 +1762,7 @@@ static struct pernet_operations pppol2t .init = pppol2tp_init_net, .exit = pppol2tp_exit_net, .id = &pppol2tp_net_id, + .async = true, };
/***************************************************************************** diff --combined net/mac80211/rx.c index 56fe16b07538,de7d10732fd5..d01743234cf6 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -439,6 -439,10 +439,10 @@@ ieee80211_add_rx_radiotap_header(struc flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; + if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) + flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; + if (status->flag & RX_FLAG_AMPDU_EOF_BIT) + flags |= IEEE80211_RADIOTAP_AMPDU_EOF; put_unaligned_le16(flags, pos); pos += 2; if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) @@@ -1185,7 -1189,7 +1189,7 @@@ static void ieee80211_rx_reorder_ampdu(
ack_policy = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; - tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr);
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) { @@@ -1524,9 -1528,7 +1528,7 @@@ ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_has_pm(hdr->frame_control) && (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))) { - u8 tid; - - tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + u8 tid = ieee80211_get_tid(hdr);
ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); } @@@ -2351,39 -2353,17 +2353,17 @@@ ieee80211_deliver_skb(struct ieee80211_ }
static ieee80211_rx_result debug_noinline - ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) + __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) { struct net_device *dev = rx->sdata->dev; struct sk_buff *skb = rx->skb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; struct sk_buff_head frame_list; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); struct ethhdr ethhdr; const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
- if (unlikely(!ieee80211_is_data(fc))) - return RX_CONTINUE; - - if (unlikely(!ieee80211_is_data_present(fc))) - return RX_DROP_MONITOR; - - if (!(status->rx_flags & IEEE80211_RX_AMSDU)) - return RX_CONTINUE; - if (unlikely(ieee80211_has_a4(hdr->frame_control))) { - switch (rx->sdata->vif.type) { - case NL80211_IFTYPE_AP_VLAN: - if (!rx->sdata->u.vlan.sta) - return RX_DROP_UNUSABLE; - break; - case NL80211_IFTYPE_STATION: - if (!rx->sdata->u.mgd.use_4addr) - return RX_DROP_UNUSABLE; - break; - default: - return RX_DROP_UNUSABLE; - } check_da = NULL; check_sa = NULL; } else switch (rx->sdata->vif.type) { @@@ -2403,15 -2383,13 +2383,13 @@@ break; }
- if (is_multicast_ether_addr(hdr->addr1)) - return RX_DROP_UNUSABLE; - skb->dev = dev; __skb_queue_head_init(&frame_list);
if (ieee80211_data_to_8023_exthdr(skb, ðhdr, rx->sdata->vif.addr, - rx->sdata->vif.type)) + rx->sdata->vif.type, + data_offset)) return RX_DROP_UNUSABLE;
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, @@@ -2433,6 -2411,44 +2411,44 @@@ return RX_QUEUED; }
+ static ieee80211_rx_result debug_noinline + ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) + { + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + + if (!(status->rx_flags & IEEE80211_RX_AMSDU)) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data(fc))) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data_present(fc))) + return RX_DROP_MONITOR; + + if (unlikely(ieee80211_has_a4(hdr->frame_control))) { + switch (rx->sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + if (!rx->sdata->u.vlan.sta) + return RX_DROP_UNUSABLE; + break; + case NL80211_IFTYPE_STATION: + if (!rx->sdata->u.mgd.use_4addr) + return RX_DROP_UNUSABLE; + break; + default: + return RX_DROP_UNUSABLE; + } + } + + if (is_multicast_ether_addr(hdr->addr1)) + return RX_DROP_UNUSABLE; + + return __ieee80211_rx_h_amsdu(rx, 0); + } + #ifdef CONFIG_MAC80211_MESH static ieee80211_rx_result ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) @@@ -2848,6 -2864,7 +2864,7 @@@ ieee80211_rx_h_action(struct ieee80211_ case WLAN_HT_ACTION_SMPS: { struct ieee80211_supported_band *sband; enum ieee80211_smps_mode smps_mode; + struct sta_opmode_info sta_opmode = {};
/* convert to HT capability */ switch (mgmt->u.action.u.ht_smps.smps_control) { @@@ -2868,17 -2885,24 +2885,24 @@@ if (rx->sta->sta.smps_mode == smps_mode) goto handled; rx->sta->sta.smps_mode = smps_mode; + sta_opmode.smps_mode = smps_mode; + sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
sband = rx->local->hw.wiphy->bands[status->band];
rate_control_rate_update(local, sband, rx->sta, IEEE80211_RC_SMPS_CHANGED); + cfg80211_sta_opmode_change_notify(sdata->dev, + rx->sta->addr, + &sta_opmode, + GFP_KERNEL); goto handled; } case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { struct ieee80211_supported_band *sband; u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; enum ieee80211_sta_rx_bandwidth max_bw, new_bw; + struct sta_opmode_info sta_opmode = {};
/* If it doesn't support 40 MHz it can't change ... */ if (!(rx->sta->sta.ht_cap.cap & @@@ -2899,9 -2923,15 +2923,15 @@@
rx->sta->sta.bandwidth = new_bw; sband = rx->local->hw.wiphy->bands[status->band]; + sta_opmode.bw = new_bw; + sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
rate_control_rate_update(local, sband, rx->sta, IEEE80211_RC_BW_CHANGED); + cfg80211_sta_opmode_change_notify(sdata->dev, + rx->sta->addr, + &sta_opmode, + GFP_KERNEL); goto handled; } default: @@@ -3731,15 -3761,6 +3761,6 @@@ void ieee80211_check_fast_rx(struct sta
switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: - /* 4-addr is harder to deal with, later maybe */ - if (sdata->u.mgd.use_4addr) - goto clear; - /* software powersave is a huge mess, avoid all of it */ - if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) - goto clear; - if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && - !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) - goto clear; if (sta->sta.tdls) { fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); @@@ -3751,6 -3772,23 +3772,23 @@@ fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS); } + + if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { + fastrx.expected_ds_bits |= + cpu_to_le16(IEEE80211_FCTL_TODS); + fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); + } + + if (!sdata->u.mgd.powersave) + break; + + /* software powersave is a huge mess, avoid all of it */ + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) + goto clear; + if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && + !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) + goto clear; break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: @@@ -3767,6 -3805,15 +3805,15 @@@ !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta); + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + sdata->u.vlan.sta) { + fastrx.expected_ds_bits |= + cpu_to_le16(IEEE80211_FCTL_FROMDS); + fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); + fastrx.internal_forward = 0; + } + break; default: goto clear; @@@ -3865,7 -3912,8 +3912,8 @@@ static bool ieee80211_invoke_fast_rx(st struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct sta_info *sta = rx->sta; int orig_len = skb->len; - int snap_offs = ieee80211_hdrlen(hdr->frame_control); + int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int snap_offs = hdrlen; struct { u8 snap[sizeof(rfc1042_header)]; __be16 proto; @@@ -3896,10 -3944,6 +3944,6 @@@ (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) return false;
- /* we don't deal with A-MSDU deaggregation here */ - if (status->rx_flags & IEEE80211_RX_AMSDU) - return false; - if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return false;
@@@ -3921,7 -3965,7 +3965,7 @@@ if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) != fast_rx->expected_ds_bits) - goto drop; + return false;
/* assign the key to drop unencrypted frames (later) * and strip the IV/MIC if necessary @@@ -3931,21 -3975,24 +3975,24 @@@ snap_offs += IEEE80211_CCMP_HDR_LEN; }
- if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) - goto drop; - payload = (void *)(skb->data + snap_offs); + if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { + if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) + goto drop;
- if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) - return false; + payload = (void *)(skb->data + snap_offs);
- /* Don't handle these here since they require special code. - * Accept AARP and IPX even though they should come with a - * bridge-tunnel header - but if we get them this way then - * there's little point in discarding them. - */ - if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || - payload->proto == fast_rx->control_port_protocol)) - return false; + if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) + return false; + + /* Don't handle these here since they require special code. + * Accept AARP and IPX even though they should come with a + * bridge-tunnel header - but if we get them this way then + * there's little point in discarding them. + */ + if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || + payload->proto == fast_rx->control_port_protocol)) + return false; + }
/* after this point, don't punt to the slowpath! */
@@@ -3959,12 -4006,6 +4006,6 @@@ }
/* statistics part of ieee80211_rx_h_sta_process() */ - stats->last_rx = jiffies; - stats->last_rate = sta_stats_encode_rate(status); - - stats->fragments++; - stats->packets++; - if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; if (!fast_rx->uses_rss) @@@ -3993,6 -4034,20 +4034,20 @@@ if (rx->key && !ieee80211_has_protected(hdr->frame_control)) goto drop;
+ if (status->rx_flags & IEEE80211_RX_AMSDU) { + if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != + RX_QUEUED) + goto drop; + + return true; + } + + stats->last_rx = jiffies; + stats->last_rate = sta_stats_encode_rate(status); + + stats->fragments++; + stats->packets++; + /* do the header conversion - first grab the addresses */ ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); diff --combined net/mac80211/tx.c index 69722504e3e1,7643178ef132..933c67b5f845 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -797,7 -797,6 +797,6 @@@ ieee80211_tx_h_sequence(struct ieee8021 { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; - u8 *qc; int tid;
/* @@@ -844,9 -843,7 +843,7 @@@ return TX_CONTINUE;
/* include per-STA, per-TID sequence counter */ - - qc = ieee80211_get_qos_ctl(hdr); - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); tx->sta->tx_stats.msdu[tid]++;
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); @@@ -1158,7 -1155,6 +1155,6 @@@ ieee80211_tx_prepare(struct ieee80211_s struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tid; - u8 *qc;
memset(tx, 0, sizeof(*tx)); tx->skb = skb; @@@ -1198,8 -1194,7 +1194,7 @@@ !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) { struct tid_ampdu_tx *tid_tx;
- qc = ieee80211_get_qos_ctl(hdr); - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr);
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); if (tid_tx) { @@@ -1921,7 -1916,7 +1916,7 @@@ void ieee80211_xmit(struct ieee80211_su { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr *hdr; int headroom; bool may_encrypt;
@@@ -3574,14 -3569,6 +3569,14 @@@ void __ieee80211_subif_start_xmit(struc if (!IS_ERR_OR_NULL(sta)) { struct ieee80211_fast_tx *fast_tx;
+ /* We need a bit of data queued to build aggregates properly, so + * instruct the TCP stack to allow more than a single ms of data + * to be queued in the stack. The value is a bit-shift of 1 + * second, so 8 is ~4ms of queued data. Only affects local TCP + * sockets. + */ + sk_pacing_shift_update(skb->sk, 8); + fast_tx = rcu_dereference(sta->fast_tx);
if (fast_tx && diff --combined net/smc/af_smc.c index 8cc97834d4f6,26684e086750..2c6f4e0a9f3d --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@@ -7,7 -7,6 +7,6 @@@ * applicable with RoCE-cards only * * Initial restrictions: - * - non-blocking connect postponed * - IPv6 support postponed * - support for alternate links postponed * - partial support for non-blocking sockets only @@@ -24,7 -23,6 +23,6 @@@
#include <linux/module.h> #include <linux/socket.h> - #include <linux/inetdevice.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/sched/signal.h> @@@ -273,47 -271,7 +271,7 @@@ static void smc_copy_sock_settings_to_s smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); }
- /* determine subnet and mask of internal TCP socket */ - int smc_netinfo_by_tcpsk(struct socket *clcsock, - __be32 *subnet, u8 *prefix_len) - { - struct dst_entry *dst = sk_dst_get(clcsock->sk); - struct in_device *in_dev; - struct sockaddr_in addr; - int rc = -ENOENT; - int len; - - if (!dst) { - rc = -ENOTCONN; - goto out; - } - if (!dst->dev) { - rc = -ENODEV; - goto out_rel; - } - - /* get address to which the internal TCP socket is bound */ - kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); - /* analyze IPv4 specific data of net_device belonging to TCP socket */ - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dst->dev); - for_ifa(in_dev) { - if (!inet_ifa_match(addr.sin_addr.s_addr, ifa)) - continue; - *prefix_len = inet_mask_len(ifa->ifa_mask); - *subnet = ifa->ifa_address & ifa->ifa_mask; - rc = 0; - break; - } endfor_ifa(in_dev); - rcu_read_unlock(); - - out_rel: - dst_release(dst); - out: - return rc; - } - - static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid) + static int smc_clnt_conf_first_link(struct smc_sock *smc) { struct smc_link_group *lgr = smc->conn.lgr; struct smc_link *link; @@@ -333,6 -291,9 +291,9 @@@ return rc; }
+ if (link->llc_confirm_rc) + return SMC_CLC_DECL_RMBE_EC; + rc = smc_ib_modify_qp_rts(link); if (rc) return SMC_CLC_DECL_INTERR; @@@ -347,11 -308,33 +308,33 @@@ /* send CONFIRM LINK response over RoCE fabric */ rc = smc_llc_send_confirm_link(link, link->smcibdev->mac[link->ibport - 1], - gid, SMC_LLC_RESP); + &link->smcibdev->gid[link->ibport - 1], + SMC_LLC_RESP); if (rc < 0) return SMC_CLC_DECL_TCL;
- return rc; + /* receive ADD LINK request from server over RoCE fabric */ + rest = wait_for_completion_interruptible_timeout(&link->llc_add, + SMC_LLC_WAIT_TIME); + if (rest <= 0) { + struct smc_clc_msg_decline dclc; + + rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), + SMC_CLC_DECLINE); + return rc; + } + + /* send add link reject message, only one link supported for now */ + rc = smc_llc_send_add_link(link, + link->smcibdev->mac[link->ibport - 1], + &link->smcibdev->gid[link->ibport - 1], + SMC_LLC_RESP); + if (rc < 0) + return SMC_CLC_DECL_TCL; + + link->state = SMC_LNK_ACTIVE; + + return 0; }
static void smc_conn_save_peer_info(struct smc_sock *smc, @@@ -373,19 -356,9 +356,9 @@@ static void smc_link_save_peer_info(str link->peer_mtu = clc->qp_mtu; }
- static void smc_lgr_forget(struct smc_link_group *lgr) - { - spin_lock_bh(&smc_lgr_list.lock); - /* do not use this link group for new connections */ - if (!list_empty(&lgr->list)) - list_del_init(&lgr->list); - spin_unlock_bh(&smc_lgr_list.lock); - } - /* setup for RDMA connection of client */ static int smc_connect_rdma(struct smc_sock *smc) { - struct sockaddr_in *inaddr = (struct sockaddr_in *)smc->addr; struct smc_clc_msg_accept_confirm aclc; int local_contact = SMC_FIRST_CONTACT; struct smc_ib_device *smcibdev; @@@ -439,8 -412,8 +412,8 @@@
srv_first_contact = aclc.hdr.flag; mutex_lock(&smc_create_lgr_pending); - local_contact = smc_conn_create(smc, inaddr->sin_addr.s_addr, smcibdev, - ibport, &aclc.lcl, srv_first_contact); + local_contact = smc_conn_create(smc, smcibdev, ibport, &aclc.lcl, + srv_first_contact); if (local_contact < 0) { rc = local_contact; if (rc == -ENOMEM) @@@ -499,8 -472,7 +472,7 @@@
if (local_contact == SMC_FIRST_CONTACT) { /* QP confirmation over RoCE fabric */ - reason_code = smc_clnt_conf_first_link( - smc, &smcibdev->gid[ibport - 1]); + reason_code = smc_clnt_conf_first_link(smc); if (reason_code < 0) { rc = reason_code; goto out_err_unlock; @@@ -559,7 -531,6 +531,6 @@@ static int smc_connect(struct socket *s goto out_err; if (addr->sa_family != AF_INET) goto out_err; - smc->addr = addr; /* needed for nonblocking connect */
lock_sock(sk); switch (sk->sk_state) { @@@ -749,9 -720,34 +720,34 @@@ static int smc_serv_conf_first_link(str
rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), SMC_CLC_DECLINE); + return rc; }
- return rc; + if (link->llc_confirm_resp_rc) + return SMC_CLC_DECL_RMBE_EC; + + /* send ADD LINK request to client over the RoCE fabric */ + rc = smc_llc_send_add_link(link, + link->smcibdev->mac[link->ibport - 1], + &link->smcibdev->gid[link->ibport - 1], + SMC_LLC_REQ); + if (rc < 0) + return SMC_CLC_DECL_TCL; + + /* receive ADD LINK response from client over the RoCE fabric */ + rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp, + SMC_LLC_WAIT_TIME); + if (rest <= 0) { + struct smc_clc_msg_decline dclc; + + rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), + SMC_CLC_DECLINE); + return rc; + } + + link->state = SMC_LNK_ACTIVE; + + return 0; }
/* setup for RDMA connection of server */ @@@ -767,11 -763,10 +763,10 @@@ static void smc_listen_work(struct work struct sock *newsmcsk = &new_smc->sk; struct smc_clc_msg_proposal *pclc; struct smc_ib_device *smcibdev; - struct sockaddr_in peeraddr; u8 buf[SMC_CLC_MAX_LEN]; struct smc_link *link; int reason_code = 0; - int rc = 0, len; + int rc = 0; __be32 subnet; u8 prefix_len; u8 ibport; @@@ -809,7 -804,7 +804,7 @@@ }
/* determine subnet and mask from internal TCP socket */ - rc = smc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len); + rc = smc_clc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len); if (rc) { reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ goto decline_rdma; @@@ -823,13 -818,10 +818,10 @@@ goto decline_rdma; }
- /* get address of the peer connected to the internal TCP socket */ - kernel_getpeername(newclcsock, (struct sockaddr *)&peeraddr, &len); - /* allocate connection / link group */ mutex_lock(&smc_create_lgr_pending); - local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr, - smcibdev, ibport, &pclc->lcl, 0); + local_contact = smc_conn_create(new_smc, smcibdev, ibport, &pclc->lcl, + 0); if (local_contact < 0) { rc = local_contact; if (rc == -ENOMEM) @@@ -1075,7 -1067,7 +1067,7 @@@ out }
static int smc_getname(struct socket *sock, struct sockaddr *addr, - int *len, int peer) + int peer) { struct smc_sock *smc;
@@@ -1085,7 -1077,7 +1077,7 @@@
smc = smc_sk(sock->sk);
- return smc->clcsock->ops->getname(smc->clcsock, addr, len, peer); + return smc->clcsock->ops->getname(smc->clcsock, addr, peer); }
static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) @@@ -1406,10 -1398,8 +1398,10 @@@ static int smc_create(struct net *net, smc->use_fallback = false; /* assume rdma capability first */ rc = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &smc->clcsock); - if (rc) + if (rc) { sk_common_release(sk); + goto out; + } smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
diff --combined net/smc/smc_core.c index 645dd226177b,702ce5f85e97..0e326d93916b --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@@ -144,7 -144,7 +144,7 @@@ free }
/* create a new SMC link group */ - static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, + static int smc_lgr_create(struct smc_sock *smc, struct smc_ib_device *smcibdev, u8 ibport, char *peer_systemid, unsigned short vlan_id) { @@@ -161,7 -161,6 +161,6 @@@ } lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; lgr->sync_err = false; - lgr->daddr = peer_in_addr; memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); lgr->vlan_id = vlan_id; rwlock_init(&lgr->sndbufs_lock); @@@ -177,7 -176,7 +176,8 @@@
lnk = &lgr->lnk[SMC_SINGLE_LINK]; /* initialize link */ + lnk->link_id = SMC_SINGLE_LINK; + lnk->state = SMC_LNK_ACTIVATING; lnk->smcibdev = smcibdev; lnk->ibport = ibport; lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; @@@ -199,6 -198,8 +199,8 @@@ goto destroy_qp; init_completion(&lnk->llc_confirm); init_completion(&lnk->llc_confirm_resp); + init_completion(&lnk->llc_add); + init_completion(&lnk->llc_add_resp);
smc->conn.lgr = lgr; rwlock_init(&lgr->conns_lock); @@@ -307,6 -308,15 +309,15 @@@ void smc_lgr_free(struct smc_link_grou kfree(lgr); }
+ void smc_lgr_forget(struct smc_link_group *lgr) + { + spin_lock_bh(&smc_lgr_list.lock); + /* do not use this link group for new connections */ + if (!list_empty(&lgr->list)) + list_del_init(&lgr->list); + spin_unlock_bh(&smc_lgr_list.lock); + } + /* terminate linkgroup abnormally */ void smc_lgr_terminate(struct smc_link_group *lgr) { @@@ -314,15 -324,7 +325,7 @@@ struct smc_sock *smc; struct rb_node *node;
- spin_lock_bh(&smc_lgr_list.lock); - if (list_empty(&lgr->list)) { - /* termination already triggered */ - spin_unlock_bh(&smc_lgr_list.lock); - return; - } - /* do not use this link group for new connections */ - list_del_init(&lgr->list); - spin_unlock_bh(&smc_lgr_list.lock); + smc_lgr_forget(lgr);
write_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); @@@ -401,7 -403,7 +404,7 @@@ static int smc_link_determine_gid(struc }
/* create a new SMC connection (and a new link group if necessary) */ - int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr, + int smc_conn_create(struct smc_sock *smc, struct smc_ib_device *smcibdev, u8 ibport, struct smc_clc_msg_local *lcl, int srv_first_contact) { @@@ -458,7 -460,7 +461,7 @@@
create: if (local_contact == SMC_FIRST_CONTACT) { - rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport, + rc = smc_lgr_create(smc, smcibdev, ibport, lcl->id_for_peer, vlan_id); if (rc) goto out; @@@ -466,7 -468,7 +469,7 @@@ rc = smc_link_determine_gid(conn->lgr); } conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; - conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); + conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&conn->acurs_lock); #endif @@@ -699,27 -701,55 +702,55 @@@ static inline int smc_rmb_reserve_rtoke return -ENOSPC; }
- /* save rkey and dma_addr received from peer during clc handshake */ - int smc_rmb_rtoken_handling(struct smc_connection *conn, - struct smc_clc_msg_accept_confirm *clc) + /* add a new rtoken from peer */ + int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey) { - u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr); - struct smc_link_group *lgr = conn->lgr; - u32 rkey = ntohl(clc->rmb_rkey); + u64 dma_addr = be64_to_cpu(nw_vaddr); + u32 rkey = ntohl(nw_rkey); int i;
for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) && test_bit(i, lgr->rtokens_used_mask)) { - conn->rtoken_idx = i; + /* already in list */ + return i; + } + } + i = smc_rmb_reserve_rtoken_idx(lgr); + if (i < 0) + return i; + lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey; + lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr; + return i; + } + + /* delete an rtoken */ + int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey) + { + u32 rkey = ntohl(nw_rkey); + int i; + + for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { + if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey && + test_bit(i, lgr->rtokens_used_mask)) { + lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0; + lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0; + + clear_bit(i, lgr->rtokens_used_mask); return 0; } } - conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr); + return -ENOENT; + } + + /* save rkey and dma_addr received from peer during clc handshake */ + int smc_rmb_rtoken_handling(struct smc_connection *conn, + struct smc_clc_msg_accept_confirm *clc) + { + conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr, + clc->rmb_rkey); if (conn->rtoken_idx < 0) return conn->rtoken_idx; - lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey; - lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr; return 0; } diff --combined net/smc/smc_llc.c index b4aa4fcedb96,54e8d6dc9201..ea4b21981b4b --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@@ -4,9 -4,6 +4,6 @@@ * * Link Layer Control (LLC) * - * For now, we only support the necessary "confirm link" functionality - * which happens for the first RoCE link after successful CLC handshake. - * * Copyright IBM Corp. 2016 * * Author(s): Klaus Wacker Klaus.Wacker@de.ibm.com @@@ -21,6 -18,122 +18,122 @@@ #include "smc_clc.h" #include "smc_llc.h"
+ #define SMC_LLC_DATA_LEN 40 + + struct smc_llc_hdr { + struct smc_wr_rx_hdr common; + u8 length; /* 44 */ + #if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:4, + add_link_rej_rsn:4; + #elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 add_link_rej_rsn:4, + reserved:4; + #endif + u8 flags; + }; + + #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03 + + struct smc_llc_msg_confirm_link { /* type 0x01 */ + struct smc_llc_hdr hd; + u8 sender_mac[ETH_ALEN]; + u8 sender_gid[SMC_GID_SIZE]; + u8 sender_qp_num[3]; + u8 link_num; + u8 link_uid[SMC_LGR_ID_SIZE]; + u8 max_links; + u8 reserved[9]; + }; + + #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40 + #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1 + + #define SMC_LLC_ADD_LNK_MAX_LINKS 2 + + struct smc_llc_msg_add_link { /* type 0x02 */ + struct smc_llc_hdr hd; + u8 sender_mac[ETH_ALEN]; + u8 reserved2[2]; + u8 sender_gid[SMC_GID_SIZE]; + u8 sender_qp_num[3]; + u8 link_num; + u8 flags2; /* QP mtu */ + u8 initial_psn[3]; + u8 reserved[8]; + }; + + #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40 + #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20 + + struct smc_llc_msg_del_link { /* type 0x04 */ + struct smc_llc_hdr hd; + u8 link_num; + __be32 reason; + u8 reserved[35]; + } __packed; /* format defined in RFC7609 */ + + struct smc_llc_msg_test_link { /* type 0x07 */ + struct smc_llc_hdr hd; + u8 user_data[16]; + u8 reserved[24]; + }; + + struct smc_rmb_rtoken { + union { + u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */ + /* is actually the num of rtokens, first */ + /* rtoken is always for the current link */ + u8 link_id; /* link id of the rtoken */ + }; + __be32 rmb_key; + __be64 rmb_vaddr; + } __packed; /* format defined in RFC7609 */ + + #define SMC_LLC_RKEYS_PER_MSG 3 + + struct smc_llc_msg_confirm_rkey { /* type 0x06 */ + struct smc_llc_hdr hd; + struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG]; + u8 reserved; + }; + + struct smc_llc_msg_confirm_rkey_cont { /* type 0x08 */ + struct smc_llc_hdr hd; + u8 num_rkeys; + struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG]; + }; + + #define SMC_LLC_DEL_RKEY_MAX 8 + #define SMC_LLC_FLAG_RKEY_NEG 0x20 + + struct smc_llc_msg_delete_rkey { /* type 0x09 */ + struct smc_llc_hdr hd; + u8 num_rkeys; + u8 err_mask; + u8 reserved[2]; + __be32 rkey[8]; + u8 reserved2[4]; + }; + + union smc_llc_msg { + struct smc_llc_msg_confirm_link confirm_link; + struct smc_llc_msg_add_link add_link; + struct smc_llc_msg_del_link delete_link; + + struct smc_llc_msg_confirm_rkey confirm_rkey; + struct smc_llc_msg_confirm_rkey_cont confirm_rkey_cont; + struct smc_llc_msg_delete_rkey delete_rkey; + + struct smc_llc_msg_test_link test_link; + struct { + struct smc_llc_hdr hdr; + u8 data[SMC_LLC_DATA_LEN]; + } raw; + }; + + #define SMC_LLC_FLAG_RESP 0x80 + /********************************** send *************************************/
struct smc_llc_tx_pend { @@@ -87,14 -200,112 +200,112 @@@ int smc_llc_send_confirm_link(struct sm memset(confllc, 0, sizeof(*confllc)); confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link); + confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; if (reqresp == SMC_LLC_RESP) confllc->hd.flags |= SMC_LLC_FLAG_RESP; memcpy(confllc->sender_mac, mac, ETH_ALEN); memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); hton24(confllc->sender_qp_num, link->roce_qp->qp_num); - /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ + confllc->link_num = link->link_id; memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); - confllc->max_links = SMC_LINKS_PER_LGR_MAX; + confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */ + /* send llc message */ + rc = smc_wr_tx_send(link, pend); + return rc; + } + + /* send ADD LINK request or response */ + int smc_llc_send_add_link(struct smc_link *link, u8 mac[], + union ib_gid *gid, + enum smc_llc_reqresp reqresp) + { + struct smc_llc_msg_add_link *addllc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_buf *wr_buf; + int rc; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + return rc; + addllc = (struct smc_llc_msg_add_link *)wr_buf; + memset(addllc, 0, sizeof(*addllc)); + addllc->hd.common.type = SMC_LLC_ADD_LINK; + addllc->hd.length = sizeof(struct smc_llc_msg_add_link); + if (reqresp == SMC_LLC_RESP) { + addllc->hd.flags |= SMC_LLC_FLAG_RESP; + /* always reject more links for now */ + addllc->hd.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; + addllc->hd.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; + } + memcpy(addllc->sender_mac, mac, ETH_ALEN); + memcpy(addllc->sender_gid, gid, SMC_GID_SIZE); + /* send llc message */ + rc = smc_wr_tx_send(link, pend); + return rc; + } + + /* send DELETE LINK request or response */ + int smc_llc_send_delete_link(struct smc_link *link, + enum smc_llc_reqresp reqresp) + { + struct smc_llc_msg_del_link *delllc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_buf *wr_buf; + int rc; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + return rc; + delllc = (struct smc_llc_msg_del_link *)wr_buf; + memset(delllc, 0, sizeof(*delllc)); + delllc->hd.common.type = SMC_LLC_DELETE_LINK; + delllc->hd.length = sizeof(struct smc_llc_msg_add_link); + if (reqresp == SMC_LLC_RESP) + delllc->hd.flags |= SMC_LLC_FLAG_RESP; + /* DEL_LINK_ALL because only 1 link supported */ + delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; + delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; + delllc->link_num = link->link_id; + /* send llc message */ + rc = smc_wr_tx_send(link, pend); + return rc; + } + + /* send LLC test link request or response */ + int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16], + enum smc_llc_reqresp reqresp) + { + struct smc_llc_msg_test_link *testllc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_buf *wr_buf; + int rc; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + return rc; + testllc = (struct smc_llc_msg_test_link *)wr_buf; + memset(testllc, 0, sizeof(*testllc)); + testllc->hd.common.type = SMC_LLC_TEST_LINK; + testllc->hd.length = sizeof(struct smc_llc_msg_test_link); + if (reqresp == SMC_LLC_RESP) + testllc->hd.flags |= SMC_LLC_FLAG_RESP; + memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); + /* send llc message */ + rc = smc_wr_tx_send(link, pend); + return rc; + } + + /* send a prepared message */ + static int smc_llc_send_message(struct smc_link *link, void *llcbuf, int llclen) + { + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_buf *wr_buf; + int rc; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + return rc; + memcpy(wr_buf, llcbuf, llclen); /* send llc message */ rc = smc_wr_tx_send(link, pend); return rc; @@@ -106,19 -317,156 +317,156 @@@ static void smc_llc_rx_confirm_link(str struct smc_llc_msg_confirm_link *llc) { struct smc_link_group *lgr; + int conf_rc;
lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); + + /* RMBE eyecatchers are not supported */ + if (llc->hd.flags & SMC_LLC_FLAG_NO_RMBE_EYEC) + conf_rc = 0; + else + conf_rc = ENOTSUPP; + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { - if (lgr->role == SMC_SERV) + if (lgr->role == SMC_SERV && + link->state == SMC_LNK_ACTIVATING) { + link->llc_confirm_resp_rc = conf_rc; complete(&link->llc_confirm_resp); + } } else { - if (lgr->role == SMC_CLNT) { + if (lgr->role == SMC_CLNT && + link->state == SMC_LNK_ACTIVATING) { + link->llc_confirm_rc = conf_rc; link->link_id = llc->link_num; complete(&link->llc_confirm); } } }
+ static void smc_llc_rx_add_link(struct smc_link *link, + struct smc_llc_msg_add_link *llc) + { + struct smc_link_group *lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + if (link->state == SMC_LNK_ACTIVATING) + complete(&link->llc_add_resp); + } else { + if (link->state == SMC_LNK_ACTIVATING) { + complete(&link->llc_add); + return; + } + + if (lgr->role == SMC_SERV) { + smc_llc_send_add_link(link, + link->smcibdev->mac[link->ibport - 1], + &link->smcibdev->gid[link->ibport - 1], + SMC_LLC_REQ); + + } else { + smc_llc_send_add_link(link, + link->smcibdev->mac[link->ibport - 1], + &link->smcibdev->gid[link->ibport - 1], + SMC_LLC_RESP); + } + } + } + + static void smc_llc_rx_delete_link(struct smc_link *link, + struct smc_llc_msg_del_link *llc) + { + struct smc_link_group *lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + if (lgr->role == SMC_SERV) + smc_lgr_terminate(lgr); + } else { + if (lgr->role == SMC_SERV) { + smc_lgr_forget(lgr); + smc_llc_send_delete_link(link, SMC_LLC_REQ); + } else { + smc_llc_send_delete_link(link, SMC_LLC_RESP); + smc_lgr_terminate(lgr); + } + } + } + + static void smc_llc_rx_test_link(struct smc_link *link, + struct smc_llc_msg_test_link *llc) + { + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + /* unused as long as we don't send this type of msg */ + } else { + smc_llc_send_test_link(link, llc->user_data, SMC_LLC_RESP); + } + } + + static void smc_llc_rx_confirm_rkey(struct smc_link *link, + struct smc_llc_msg_confirm_rkey *llc) + { + struct smc_link_group *lgr; + int rc; + + lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); + + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + /* unused as long as we don't send this type of msg */ + } else { + rc = smc_rtoken_add(lgr, + llc->rtoken[0].rmb_vaddr, + llc->rtoken[0].rmb_key); + + /* ignore rtokens for other links, we have only one link */ + + llc->hd.flags |= SMC_LLC_FLAG_RESP; + if (rc < 0) + llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; + smc_llc_send_message(link, (void *)llc, sizeof(*llc)); + } + } + + static void smc_llc_rx_confirm_rkey_cont(struct smc_link *link, + struct smc_llc_msg_confirm_rkey_cont *llc) + { + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + /* unused as long as we don't send this type of msg */ + } else { + /* ignore rtokens for other links, we have only one link */ + llc->hd.flags |= SMC_LLC_FLAG_RESP; + smc_llc_send_message(link, (void *)llc, sizeof(*llc)); + } + } + + static void smc_llc_rx_delete_rkey(struct smc_link *link, + struct smc_llc_msg_delete_rkey *llc) + { + struct smc_link_group *lgr; + u8 err_mask = 0; + int i, max; + + lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); + + if (llc->hd.flags & SMC_LLC_FLAG_RESP) { + /* unused as long as we don't send this type of msg */ + } else { + max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); + for (i = 0; i < max; i++) { + if (smc_rtoken_delete(lgr, llc->rkey[i])) + err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i); + } + + if (err_mask) { + llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; + llc->err_mask = err_mask; + } + + llc->hd.flags |= SMC_LLC_FLAG_RESP; + smc_llc_send_message(link, (void *)llc, sizeof(*llc)); + } + } + static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) { struct smc_link *link = (struct smc_link *)wc->qp->qp_context; @@@ -128,8 -476,30 +476,30 @@@ return; /* short message */ if (llc->raw.hdr.length != sizeof(*llc)) return; /* invalid message */ - if (llc->raw.hdr.common.type == SMC_LLC_CONFIRM_LINK) + + switch (llc->raw.hdr.common.type) { + case SMC_LLC_TEST_LINK: + smc_llc_rx_test_link(link, &llc->test_link); + break; + case SMC_LLC_CONFIRM_LINK: smc_llc_rx_confirm_link(link, &llc->confirm_link); + break; + case SMC_LLC_ADD_LINK: + smc_llc_rx_add_link(link, &llc->add_link); + break; + case SMC_LLC_DELETE_LINK: + smc_llc_rx_delete_link(link, &llc->delete_link); + break; + case SMC_LLC_CONFIRM_RKEY: + smc_llc_rx_confirm_rkey(link, &llc->confirm_rkey); + break; + case SMC_LLC_CONFIRM_RKEY_CONT: + smc_llc_rx_confirm_rkey_cont(link, &llc->confirm_rkey_cont); + break; + case SMC_LLC_DELETE_RKEY: + smc_llc_rx_delete_rkey(link, &llc->delete_rkey); + break; + } }
/***************************** init, exit, misc ******************************/ @@@ -140,6 -510,30 +510,30 @@@ static struct smc_wr_rx_handler smc_llc .type = SMC_LLC_CONFIRM_LINK }, { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_TEST_LINK + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_ADD_LINK + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_DELETE_LINK + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_RKEY + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_RKEY_CONT + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_DELETE_RKEY + }, + { .handler = NULL, } }; diff --combined net/tipc/group.c index 04e516d18054,03086ccb7746..d7a7befeddd4 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@@ -37,7 -37,7 +37,7 @@@ #include "addr.h" #include "group.h" #include "bcast.h" - #include "server.h" + #include "topsrv.h" #include "msg.h" #include "socket.h" #include "node.h" @@@ -189,7 -189,6 +189,7 @@@ struct tipc_group *tipc_group_create(st grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; grp->open = group_is_open; + *grp->open = false; filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, filter, &grp->subid)) diff --combined net/tipc/socket.c index 7dfa9fc99ec3,f93477187a90..8b04e601311c --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@@ -473,7 -473,6 +473,7 @@@ static int tipc_sk_create(struct net *n sk->sk_write_space = tipc_write_space; sk->sk_destruct = tipc_sock_destruct; tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; + tsk->group_is_open = true; atomic_set(&tsk->dupl_rcvcnt, 0);
/* Start out with safe limits until we receive an advertised window */ @@@ -666,7 -665,7 +666,7 @@@ exit * a completely predictable manner). */ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, - int *uaddr_len, int peer) + int peer) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; struct sock *sk = sock->sk; @@@ -685,13 -684,12 +685,12 @@@ addr->addr.id.node = tn->own_addr; }
- *uaddr_len = sizeof(*addr); addr->addrtype = TIPC_ADDR_ID; addr->family = AF_TIPC; addr->scope = 0; addr->addr.name.domain = 0;
- return 0; + return sizeof(*addr); }
/** diff --combined net/xfrm/xfrm_policy.c index 625b3fca5704,77d9d1ab05ce..cb3bb9ae4407 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@@ -1458,13 -1458,10 +1458,13 @@@ xfrm_tmpl_resolve(struct xfrm_policy ** static int xfrm_get_tos(const struct flowi *fl, int family) { const struct xfrm_policy_afinfo *afinfo; - int tos = 0; + int tos;
afinfo = xfrm_policy_get_afinfo(family); - tos = afinfo ? afinfo->get_tos(fl) : 0; + if (!afinfo) + return 0; + + tos = afinfo->get_tos(fl);
rcu_read_unlock();
@@@ -1894,7 -1891,7 +1894,7 @@@ static void xfrm_policy_queue_process(s spin_unlock(&pq->hold_queue.lock);
dst_hold(xfrm_dst_path(dst)); - dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0); + dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); if (IS_ERR(dst)) goto purge_queue;
@@@ -2732,14 -2729,14 +2732,14 @@@ static const void *xfrm_get_dst_nexthop while (dst->xfrm) { const struct xfrm_state *xfrm = dst->xfrm;
+ dst = xfrm_dst_child(dst); + if (xfrm->props.mode == XFRM_MODE_TRANSPORT) continue; if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) daddr = xfrm->coaddr; else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) daddr = &xfrm->id.daddr; - - dst = xfrm_dst_child(dst); } return daddr; } @@@ -2985,6 -2982,7 +2985,7 @@@ static void __net_exit xfrm_net_exit(st static struct pernet_operations __net_initdata xfrm_net_ops = { .init = xfrm_net_init, .exit = xfrm_net_exit, + .async = true, };
void __init xfrm_init(void) diff --combined tools/testing/selftests/bpf/test_verifier.c index 437c0b1c9d21,9eb05f3135ac..cc8f435570b4 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@@ -24,7 -24,6 +24,6 @@@ #include <limits.h>
#include <sys/capability.h> - #include <sys/resource.h>
#include <linux/unistd.h> #include <linux/filter.h> @@@ -41,7 -40,7 +40,7 @@@ # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 # endif #endif - + #include "bpf_rlimit.h" #include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE @@@ -57,6 -56,9 +56,9 @@@ #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
+ #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" + static bool unpriv_disabled = false; + struct bpf_test { const char *descr; struct bpf_insn insns[MAX_INSNS]; @@@ -2587,17 -2589,74 +2589,74 @@@ static struct bpf_test tests[] = .result = ACCEPT, }, { + "runtime/jit: tail_call within bounds, prog once", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 42, + }, + { + "runtime/jit: tail_call within bounds, prog loop", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 41, + }, + { + "runtime/jit: tail_call within bounds, no prog", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 1, + }, + { + "runtime/jit: tail_call out of bounds", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 256), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 2, + }, + { "runtime/jit: pass negative index to tail_call", .insns = { BPF_MOV64_IMM(BPF_REG_3, -1), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 2), BPF_EXIT_INSN(), }, .fixup_prog = { 1 }, .result = ACCEPT, + .retval = 2, }, { "runtime/jit: pass > 32bit index to tail_call", @@@ -2606,11 -2665,12 +2665,12 @@@ BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 2), BPF_EXIT_INSN(), }, .fixup_prog = { 2 }, .result = ACCEPT, + .retval = 42, }, { "stack pointer arithmetic", @@@ -11164,63 -11224,94 +11224,152 @@@ .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { + "xadd/w check unaligned stack", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "misaligned stack access off", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "xadd/w check unaligned map", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = REJECT, + .errstr = "misaligned value access off", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "xadd/w check unaligned pkt", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 99), + BPF_JMP_IMM(BPF_JA, 0, 0, 6), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), + BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1), + BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "BPF_XADD stores into R2 packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, ++ { + "jit: lsh, rsh, arsh by 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 0xff), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jit: mov32 for ldimm64, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32), + BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jit: mov32 for ldimm64, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL), + BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jit: various mul tests", + .insns = { + BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_1, 0xefefefULL), + BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV32_REG(BPF_REG_2, BPF_REG_2), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + };
static int probe_filter_length(const struct bpf_insn *fp) @@@ -11245,16 -11336,61 +11394,61 @@@ static int create_map(uint32_t size_val return fd; }
+ static int create_prog_dummy1(void) + { + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }; + + return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + } + + static int create_prog_dummy2(int mfd, int idx) + { + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_3, idx), + BPF_LD_MAP_FD(BPF_REG_2, mfd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 41), + BPF_EXIT_INSN(), + }; + + return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + } + static int create_prog_array(void) { - int fd; + int p1key = 0, p2key = 1; + int mfd, p1fd, p2fd;
- fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), - sizeof(int), 4, 0); - if (fd < 0) + mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), + sizeof(int), 4, 0); + if (mfd < 0) { printf("Failed to create prog array '%s'!\n", strerror(errno)); + return -1; + }
- return fd; + p1fd = create_prog_dummy1(); + p2fd = create_prog_dummy2(mfd, p2key); + if (p1fd < 0 || p2fd < 0) + goto out; + if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) + goto out; + if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) + goto out; + close(p2fd); + close(p1fd); + + return mfd; + out: + close(p2fd); + close(p1fd); + close(mfd); + return -1; }
static int create_map_in_map(void) @@@ -11375,7 -11511,8 +11569,8 @@@ static void do_test_single(struct bpf_t goto fail_log; } if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { - printf("FAIL\nUnexpected error message!\n"); + printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n", + expected_err, bpf_vlog); goto fail_log; } } @@@ -11459,9 -11596,20 +11654,20 @@@ out return ret; }
+ static void get_unpriv_disabled() + { + char buf[2]; + FILE *fd; + + fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); + if (fgets(buf, 2, fd) == buf && atoi(buf)) + unpriv_disabled = true; + fclose(fd); + } + static int do_test(bool unpriv, unsigned int from, unsigned int to) { - int i, passes = 0, errors = 0; + int i, passes = 0, errors = 0, skips = 0;
for (i = from; i < to; i++) { struct bpf_test *test = &tests[i]; @@@ -11469,7 -11617,10 +11675,10 @@@ /* Program types that are not supported by non-root we * skip right away. */ - if (!test->prog_type) { + if (!test->prog_type && unpriv_disabled) { + printf("#%d/u %s SKIP\n", i, test->descr); + skips++; + } else if (!test->prog_type) { if (!unpriv) set_admin(false); printf("#%d/u %s ", i, test->descr); @@@ -11478,20 -11629,22 +11687,22 @@@ set_admin(true); }
- if (!unpriv) { + if (unpriv) { + printf("#%d/p %s SKIP\n", i, test->descr); + skips++; + } else { printf("#%d/p %s ", i, test->descr); do_test_single(test, false, &passes, &errors); } }
- printf("Summary: %d PASSED, %d FAILED\n", passes, errors); + printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes, + skips, errors); return errors ? EXIT_FAILURE : EXIT_SUCCESS; }
int main(int argc, char **argv) { - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; - struct rlimit rlim = { 1 << 20, 1 << 20 }; unsigned int from = 0, to = ARRAY_SIZE(tests); bool unpriv = !is_admin();
@@@ -11512,6 -11665,12 +11723,12 @@@ } }
- setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf); + get_unpriv_disabled(); + if (unpriv && unpriv_disabled) { + printf("Cannot run as unprivileged user with sysctl %s.\n", + UNPRIV_SYSCTL); + return EXIT_FAILURE; + } + return do_test(unpriv, from, to); }
linux-merge@lists.open-mesh.org