The following commit has been merged in the master branch: commit aa2eaa8c272a3211dec07ce9c6c863a7e355c10e Merge: a3d3c74da49c65fc63a937fa559186b0e16adca3 1609d7604b847a9820e63393d1a3b6cac7286d40 Author: David S. Miller davem@davemloft.net Date: Sun Sep 15 14:17:27 2019 +0200
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index 84bb34727f81,a50e97a63bc8..16fc09defd39 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -938,14 -938,6 +938,14 @@@ S: Supporte F: drivers/mux/adgs1408.c F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt
+ANALOG DEVICES INC ADIN DRIVER +M: Alexandru Ardelean alexaundru.ardelean@analog.com +L: netdev@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/net/phy/adin.c +F: Documentation/devicetree/bindings/net/adi,adin.yaml + ANALOG DEVICES INC ADIS DRIVER LIBRARY M: Alexandru Ardelean alexandru.ardelean@analog.com S: Supported @@@ -2923,7 -2915,6 +2923,7 @@@ BATMAN ADVANCE M: Marek Lindner mareklindner@neomailbox.ch M: Simon Wunderlich sw@simonwunderlich.de M: Antonio Quartulli a@unstable.cc +M: Sven Eckelmann sven@narfation.org L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ B: https://www.open-mesh.org/projects/batman-adv/issues @@@ -3644,12 -3635,9 +3644,12 @@@ S: Maintaine F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h +F: include/linux/can/led.h +F: include/linux/can/rx-offload.h F: include/linux/can/platform/ F: include/uapi/linux/can/error.h F: include/uapi/linux/can/netlink.h +F: include/uapi/linux/can/vxcan.h
CAN NETWORK LAYER M: Oliver Hartkopp socketcan@hartkopp.net @@@ -3662,23 -3650,11 +3662,23 @@@ S: Maintaine F: Documentation/networking/can.rst F: net/can/ F: include/linux/can/core.h +F: include/linux/can/skb.h +F: include/net/netns/can.h F: include/uapi/linux/can.h F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h F: include/uapi/linux/can/gw.h
+CAN-J1939 NETWORK LAYER +M: Robin van der Gracht robin@protonic.nl +M: Oleksij Rempel o.rempel@pengutronix.de +R: Pengutronix Kernel Team kernel@pengutronix.de +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/networking/j1939.txt +F: net/can/j1939/ +F: include/uapi/linux/can/j1939.h + CAPABILITIES M: Serge Hallyn serge@hallyn.com L: linux-security-module@vger.kernel.org @@@ -7480,7 -7456,6 +7480,7 @@@ F: drivers/hid/hid-hyperv. F: drivers/hv/ F: drivers/input/serio/hyperv-keyboard.c F: drivers/pci/controller/pci-hyperv.c +F: drivers/pci/controller/pci-hyperv-intf.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c @@@ -11191,7 -11166,6 +11191,7 @@@ S: Maintaine W: https://fedorahosted.org/dropwatch/ F: net/core/drop_monitor.c F: include/uapi/linux/net_dropmon.h +F: include/net/drop_monitor.h
NETWORKING DRIVERS M: "David S. Miller" davem@davemloft.net @@@ -11371,6 -11345,7 +11371,6 @@@ F: include/net/nfc F: include/uapi/linux/nfc.h F: drivers/nfc/ F: include/linux/platform_data/nfcmrvl.h -F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS @@@ -12618,14 -12593,6 +12618,14 @@@ L: platform-driver-x86@vger.kernel.or S: Maintained F: drivers/platform/x86/peaq-wmi.c
+PENSANDO ETHERNET DRIVERS +M: Shannon Nelson snelson@pensando.io +M: Pensando Drivers drivers@pensando.io +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/device_drivers/pensando/ionic.rst +F: drivers/net/ethernet/pensando/ + PER-CPU MEMORY ALLOCATOR M: Dennis Zhou dennis@kernel.org M: Tejun Heo tj@kernel.org @@@ -13273,7 -13240,7 +13273,7 @@@ M: Manish Chopra <manishc@marvell.com M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/ethernet/qlogic/qlge/ +F: drivers/staging/qlge/
QM1D1B0004 MEDIA DRIVER M: Akihiro Tsukada tskd08@gmail.com @@@ -17732,8 -17699,7 +17732,7 @@@ F: include/uapi/linux/dqblk_xfs. F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER - M: Anirudha Sarangi anirudh@xilinx.com - M: John Linn John.Linn@xilinx.com + M: Radhey Shyam Pandey radhey.shyam.pandey@xilinx.com S: Maintained F: drivers/net/ethernet/xilinx/xilinx_axienet*
diff --combined drivers/bluetooth/btusb.c index ed455de598ea,ba4149054304..a9c35ebb30f8 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@@ -384,6 -384,9 +384,9 @@@ static const struct usb_device_id black { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@@ -435,7 -438,6 +438,7 @@@ static const struct dmi_system_id btusb #define BTUSB_OOB_WAKE_ENABLED 11 #define BTUSB_HW_RESET_ACTIVE 12 #define BTUSB_TX_WAIT_VND_EVT 13 +#define BTUSB_WAKEUP_DISABLE 14
struct btusb_data { struct hci_dev *hdev; @@@ -524,36 -526,6 +527,36 @@@ static void btusb_intel_cmd_timeout(str gpiod_set_value_cansleep(reset_gpio, 0); }
+static void btusb_rtl_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; + + if (++data->cmd_timeout_cnt < 5) + return; + + if (!reset_gpio) { + bt_dev_err(hdev, "No gpio to reset Realtek device, ignoring"); + return; + } + + /* Toggle the hard reset line. The Realtek device is going to + * yank itself off the USB and then replug. The cleanup is handled + * correctly on the way out (standard USB disconnect), and the new + * device is detected cleanly and bound to the driver again like + * it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + bt_dev_err(hdev, "Reset Realtek device via gpio"); + gpiod_set_value_cansleep(reset_gpio, 0); + msleep(200); + gpiod_set_value_cansleep(reset_gpio, 1); +} + static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@@ -1201,18 -1173,7 +1204,14 @@@ static int btusb_open(struct hci_dev *h }
data->intf->needs_remote_wakeup = 1; - /* device specific wakeup source enabled and required for USB - * remote wakeup while host is suspended - */ - device_wakeup_enable(&data->udev->dev);
+ /* Disable device remote wakeup when host is suspended + * For Realtek chips, global suspend without + * SET_FEATURE (DEVICE_REMOTE_WAKEUP) can save more power in device. + */ + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) + device_wakeup_disable(&data->udev->dev); + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done;
@@@ -1276,12 -1237,6 +1275,11 @@@ static int btusb_close(struct hci_dev * goto failed;
data->intf->needs_remote_wakeup = 0; + + /* Enable remote wake up for auto-suspend */ + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) + data->intf->needs_remote_wakeup = 1; + - device_wakeup_disable(&data->udev->dev); usb_autopm_put_interface(data->intf);
failed: @@@ -3813,13 -3768,12 +3811,13 @@@ static int btusb_probe(struct usb_inter if (id->driver_info & BTUSB_REALTEK) { hdev->setup = btrtl_setup_realtek; hdev->shutdown = btrtl_shutdown_realtek; + hdev->cmd_timeout = btusb_rtl_cmd_timeout;
- /* Realtek devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * Explicitly request a device reset on resume. + /* Realtek devices lose their updated firmware over global + * suspend that means host doesn't send SET_FEATURE + * (DEVICE_REMOTE_WAKEUP) */ - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; + set_bit(BTUSB_WAKEUP_DISABLE, &data->flags); } #endif
@@@ -3993,19 -3947,6 +3991,19 @@@ static int btusb_suspend(struct usb_int enable_irq(data->oob_wake_irq); }
+ /* For global suspend, Realtek devices lose the loaded fw + * in them. But for autosuspend, firmware should remain. + * Actually, it depends on whether the usb host sends + * set feature (enable wakeup) or not. + */ + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) { + if (PMSG_IS_AUTO(message) && + device_can_wakeup(&data->udev->dev)) + data->udev->do_remote_wakeup = 1; + else if (!PMSG_IS_AUTO(message)) + data->udev->reset_resume = 1; + } + return 0; }
diff --combined drivers/bluetooth/hci_qca.c index d33828fef89f,31dec435767b..e3164c200eac --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@@ -309,13 -309,14 +309,14 @@@ static void qca_wq_awake_device(struct ws_awake_device); struct hci_uart *hu = qca->hu; unsigned long retrans_delay; + unsigned long flags;
BT_DBG("hu %p wq awake device", hu);
/* Vote for serial clock */ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock); + spin_lock_irqsave(&qca->hci_ibs_lock, flags);
/* Send wake indication to device */ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) @@@ -327,7 -328,7 +328,7 @@@ retrans_delay = msecs_to_jiffies(qca->wake_retrans); mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
- spin_unlock(&qca->hci_ibs_lock); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */ hci_uart_tx_wakeup(hu); @@@ -338,12 -339,13 +339,13 @@@ static void qca_wq_awake_rx(struct work struct qca_data *qca = container_of(work, struct qca_data, ws_awake_rx); struct hci_uart *hu = qca->hu; + unsigned long flags;
BT_DBG("hu %p wq awake rx", hu);
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock); + spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
/* Always acknowledge device wake up, @@@ -354,7 -356,7 +356,7 @@@
qca->ibs_sent_wacks++;
- spin_unlock(&qca->hci_ibs_lock); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */ hci_uart_tx_wakeup(hu); @@@ -502,7 -504,26 +504,7 @@@ static int qca_open(struct hci_uart *hu qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
- /* clocks actually on, but we start votes off */ - qca->tx_vote = false; - qca->rx_vote = false; - qca->flags = 0; - - qca->ibs_sent_wacks = 0; - qca->ibs_sent_slps = 0; - qca->ibs_sent_wakes = 0; - qca->ibs_recv_wacks = 0; - qca->ibs_recv_slps = 0; - qca->ibs_recv_wakes = 0; qca->vote_last_jif = jiffies; - qca->vote_on_ms = 0; - qca->vote_off_ms = 0; - qca->votes_on = 0; - qca->votes_off = 0; - qca->tx_votes_on = 0; - qca->tx_votes_off = 0; - qca->rx_votes_on = 0; - qca->rx_votes_off = 0;
hu->priv = qca;
@@@ -1242,11 -1263,6 +1244,11 @@@ static int qca_setup(struct hci_uart *h /* Patch downloading has to be done without IBS mode */ clear_bit(QCA_IBS_ENABLED, &qca->flags);
+ /* Enable controller to do both LE scan and BR/EDR inquiry + * simultaneously. + */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + if (qca_is_wcn399x(soc_type)) { bt_dev_info(hdev, "setting up wcn3990");
@@@ -1312,7 -1328,7 +1314,7 @@@ return ret; }
-static struct hci_uart_proto qca_proto = { +static const struct hci_uart_proto qca_proto = { .id = HCI_UART_QCA, .name = "QCA", .manufacturer = 29, @@@ -1375,8 -1391,6 +1377,8 @@@ static int qca_power_off(struct hci_de /* Perform pre shutdown command */ qca_send_pre_shutdown_cmd(hdev);
+ usleep_range(8000, 10000); + qca_power_shutdown(hu); return 0; } diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 58c6231aaa00,85a3b062371d..87dece0e745d --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@@ -98,7 -98,7 +98,7 @@@ static const struct hclge_hw_error hclg .reset_level = HNAE3_GLOBAL_RESET }, { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", + { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", .reset_level = HNAE3_GLOBAL_RESET }, { .int_msk = BIT(3), .msg = "tx_buf_overflow", .reset_level = HNAE3_GLOBAL_RESET }, @@@ -637,8 -637,8 +637,8 @@@ static void hclge_log_error(struct devi { while (err->msg) { if (err->int_msk & err_sts) { - dev_warn(dev, "%s %s found [error status=0x%x]\n", - reg, err->msg, err_sts); + dev_err(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); if (err->reset_level && err->reset_level != HNAE3_NONE_RESET) set_bit(err->reset_level, reset_requests); @@@ -652,11 -652,16 +652,11 @@@ * @desc: descriptor for describing the command * @cmd: command opcode * @flag: flag for extended command structure - * @w_num: offset for setting the read interrupt type. - * @int_type: select which type of the interrupt for which the error - * info will be read(RAS-CE/RAS-NFE/RAS-FE etc). * * This function query the error info from hw register/s using command */ static int hclge_cmd_query_error(struct hclge_dev *hdev, - struct hclge_desc *desc, u32 cmd, - u16 flag, u8 w_num, - enum hclge_err_int_type int_type) + struct hclge_desc *desc, u32 cmd, u16 flag) { struct device *dev = &hdev->pdev->dev; int desc_num = 1; @@@ -668,6 -673,8 +668,6 @@@ hclge_cmd_setup_basic_desc(&desc[1], cmd, true); desc_num = 2; } - if (w_num) - desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); if (ret) @@@ -865,7 -872,8 +865,7 @@@ static int hclge_config_tm_hw_err_int(s }
/* configure TM QCN hw errors */ - ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, - 0, 0, 0); + ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0); if (ret) { dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); return ret; @@@ -930,44 -938,32 +930,44 @@@ static int hclge_config_ppu_error_inter /* configure PPU error interrupts */ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); - desc[0].flag |= HCLGE_CMD_FLAG_NEXT; + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], cmd, false); if (en) { - desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN; - desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN; - desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN; - desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN; + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN); + desc[1].data[3] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN); + desc[1].data[4] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN); }
- desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK; - desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; - desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; - desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; + desc[1].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK); + desc[1].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK); + desc[1].data[3] |= + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK); desc_num = 2; } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); if (en) - desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2; + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2);
- desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK; + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK); } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); if (en) - desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN; + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN);
- desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK; + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK); } else { dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); return -EINVAL; @@@ -1175,8 -1171,8 +1175,8 @@@ static int hclge_handle_mpf_ras_error(s
status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", - status); + dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); }
@@@ -1212,8 -1208,8 +1212,8 @@@ desc_data = (__le32 *)&desc[5]; status = le32_to_cpu(*(desc_data + 1)); if (status) { - dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", - "rpu_rx_pkt_ecc_mbit_err"); + dev_err(dev, + "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n"); set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); }
@@@ -1325,12 -1321,10 +1325,12 @@@ static int hclge_handle_pf_ras_error(st /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; - if (status) + if (status) { hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", &hclge_ppu_pf_abnormal_int[0], status, &ae_dev->hw_err_reset_req); + hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR); + }
/* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); @@@ -1393,17 -1387,17 +1393,17 @@@ static int hclge_log_rocee_axi_error(st return ret; }
- dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", - le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), - le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), - le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); - dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", - le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), - le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), - le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); - dev_info(dev, "AXI3: %08X %08X %08X %08X\n", - le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), - le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3])); + dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), + le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), + le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); + dev_err(dev, "AXI3: %08X %08X %08X %08X\n", + le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), + le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0; } @@@ -1416,18 -1410,18 +1416,18 @@@ static int hclge_log_rocee_ecc_error(st
ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, - HCLGE_CMD_FLAG_NEXT, 0, 0); + HCLGE_CMD_FLAG_NEXT); if (ret) { dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); return ret; }
- dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", - le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), - le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), - le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); - dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), - le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2])); + dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), + le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0; } @@@ -1440,7 -1434,7 +1440,7 @@@ static int hclge_log_rocee_ovf_error(st
/* read overflow error status */ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, - 0, 0, 0); + 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); return ret; @@@ -1456,9 -1450,9 +1456,9 @@@ le32_to_cpu(desc[0].data[0]); while (err->msg) { if (err->int_msk == err_sts) { - dev_warn(dev, "%s [error status=0x%x] found\n", - err->msg, - le32_to_cpu(desc[0].data[0])); + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); break; } err++; @@@ -1466,13 -1460,13 +1466,13 @@@ }
if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { - dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n", - le32_to_cpu(desc[0].data[1])); + dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); }
if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { - dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n", - le32_to_cpu(desc[0].data[2])); + dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); }
return 0; @@@ -1489,7 -1483,8 +1489,7 @@@ hclge_log_and_clear_rocee_ras_error(str
/* read RAS error interrupt status */ ret = hclge_cmd_query_error(hdev, &desc[0], - HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, - 0, 0, 0); + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); /* reset everything for now */ @@@ -1500,10 -1495,10 +1500,10 @@@
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) { if (status & HCLGE_ROCEE_RERR_INT_MASK) - dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + dev_err(dev, "ROCEE RAS AXI rresp error\n");
if (status & HCLGE_ROCEE_BERR_INT_MASK) - dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + dev_err(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
@@@ -1513,7 -1508,7 +1513,7 @@@ }
if (status & HCLGE_ROCEE_ECC_INT_MASK) { - dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); + dev_err(dev, "ROCEE RAS 2bit ECC error\n"); reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev); @@@ -1571,8 -1566,8 +1571,8 @@@ int hclge_config_rocee_ras_interrupt(st
static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { - enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct hclge_dev *hdev = ae_dev->priv; + enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || hdev->pdev->revision < 0x21) @@@ -1654,16 -1649,16 +1654,16 @@@ pci_ers_result_t hclge_handle_hw_ras_er
/* Handling Non-fatal HNS RAS errors */ if (status & HCLGE_RAS_REG_NFE_MASK) { - dev_warn(dev, - "HNS Non-Fatal RAS error(status=0x%x) identified\n", - status); + dev_err(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); hclge_handle_all_ras_errors(hdev); }
/* Handling Non-fatal Rocee RAS errors */ if (hdev->pdev->revision >= 0x21 && status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { - dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n"); + dev_err(dev, "ROCEE Non-Fatal RAS error identified\n"); hclge_handle_rocee_ras_error(ae_dev); }
@@@ -1742,8 -1737,8 +1742,8 @@@ static void hclge_handle_over_8bd_err(s return; }
- dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n", - vf_id, q_id); + dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n", + vf_id, q_id);
if (vf_id) { if (vf_id >= hdev->num_alloc_vport) { @@@ -1760,8 -1755,8 +1760,8 @@@
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]); if (ret) - dev_warn(dev, "inform reset to vf(%d) failed %d!\n", - hdev->vport->vport_id, ret); + dev_err(dev, "inform reset to vf(%u) failed %d!\n", + hdev->vport->vport_id, ret); } else { set_bit(HNAE3_FUNC_RESET, reset_requests); } @@@ -1807,8 -1802,8 +1807,8 @@@ static int hclge_handle_mpf_msix_error( status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) - dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", - status); + dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", + status);
/* clear all main PF MSIx errors */ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); @@@ -2002,7 -1997,7 +2002,7 @@@ void hclge_handle_all_hns_hw_errors(str
/* Handle Non-fatal HNS RAS errors */ if (status & HCLGE_RAS_REG_NFE_MASK) { - dev_warn(dev, "HNS hw error(RAS) identified during init\n"); + dev_err(dev, "HNS hw error(RAS) identified during init\n"); hclge_handle_all_ras_errors(hdev); }
diff --combined drivers/net/ethernet/ibm/ibmvnic.c index 4f83f97ffe8b,5cb55ea671e3..2e5172f61564 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -1485,7 -1485,7 +1485,7 @@@ static netdev_tx_t ibmvnic_xmit(struct
memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { @@@ -1984,8 -1984,11 +1984,11 @@@ static void __ibmvnic_reset(struct work rwi = get_next_rwi(adapter); while (rwi) { if (adapter->state == VNIC_REMOVING || - adapter->state == VNIC_REMOVED) - goto out; + adapter->state == VNIC_REMOVED) { + kfree(rwi); + rc = EBUSY; + break; + }
if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; @@@ -2011,7 -2014,7 +2014,7 @@@ netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } - out: + adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dc034f4e8cf6,51c696b6bf64..1ce2397306b9 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -36,6 -36,7 +36,7 @@@ #include <net/vxlan.h> #include <net/mpls.h> #include <net/xdp_sock.h> + #include <net/xfrm.h>
#include "ixgbe.h" #include "ixgbe_common.h" @@@ -1785,7 -1786,7 +1786,7 @@@ static bool ixgbe_is_non_eop(struct ixg static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len;
@@@ -1807,7 -1808,7 +1808,7 @@@
/* update all of the pointers */ skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; + skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } @@@ -1825,7 -1826,13 +1826,7 @@@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } else if (ring_uses_build_skb(rx_ring)) { + if (ring_uses_build_skb(rx_ring)) { unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
dma_sync_single_range_for_cpu(rx_ring->dev, @@@ -1834,22 -1841,14 +1835,22 @@@ skb_headlen(skb), DMA_FROM_DEVICE); } else { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } + + /* If the page was released, just unmap it. */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + } }
/** @@@ -2623,7 -2622,7 +2624,7 @@@ adjust_by_size /* 16K ints/sec to 9.2K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; - } else if (avg_wire_size <= 1980) { + } else if (avg_wire_size < 1968) { /* 9.2K ints/sec to 8K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; @@@ -2656,6 -2655,8 +2657,8 @@@ case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10_FULL: + if (avg_wire_size > 8064) + avg_wire_size = 8064; itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC; @@@ -8185,7 -8186,7 +8188,7 @@@ static int ixgbe_tx_map(struct ixgbe_ri struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@@ -8604,8 -8605,7 +8607,8 @@@ netdev_tx_t ixgbe_xmit_frame_ring(struc * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f]));
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; @@@ -8698,7 -8698,7 +8701,7 @@@ #endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_IPSEC - if (secpath_exists(skb) && + if (xfrm_offload(skb) && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif @@@ -8748,7 -8748,7 +8751,7 @@@ static netdev_tx_t __ixgbe_xmit_frame(s if (skb_put_padto(skb, 17)) return NETDEV_TX_OK;
- tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) return NETDEV_TX_BUSY;
@@@ -9490,10 -9490,6 +9493,10 @@@ static int ixgbe_configure_clsu32(struc jump->mat = nexthdr[i].jump; adapter->jump_tables[link_uhtid] = jump; break; + } else { + kfree(mask); + kfree(input); + kfree(jump); } } return 0; @@@ -10266,8 -10262,7 +10269,8 @@@ static int ixgbe_xdp_setup(struct net_d if (need_reset && prog) for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->xdp_ring[i]->xsk_umem) - (void)ixgbe_xsk_async_xmit(adapter->netdev, i); + (void)ixgbe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX);
return 0; } @@@ -10386,7 -10381,7 +10389,7 @@@ static const struct net_device_ops ixgb .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, - .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, + .ndo_xsk_wakeup = ixgbe_xsk_wakeup, };
static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index ad802a8909e0,a3b6d8c89127..a37dcd140f63 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@@ -100,7 -100,7 +100,7 @@@ static int ixgbe_xsk_umem_enable(struc ixgbe_txrx_ring_enable(adapter, qid);
/* Kick start the NAPI context so that receiving will start */ - err = ixgbe_xsk_async_xmit(adapter->netdev, qid); + err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } @@@ -143,9 -143,7 +143,9 @@@ static int ixgbe_run_xdp_zc(struct ixgb struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { + struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = IXGBE_XDP_PASS; + u64 offset = umem->headroom; struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; u32 act; @@@ -153,10 -151,7 +153,10 @@@ rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; + offset += xdp->data - xdp->data_hard_start; + + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); + switch (act) { case XDP_PASS: break; @@@ -206,6 -201,8 +206,6 @@@ ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *obi) { - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; struct ixgbe_rx_buffer *nbi;
@@@ -215,9 -212,14 +215,9 @@@ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ - nbi->dma = obi->dma & mask; - nbi->dma += hr; - - nbi->addr = (void *)((unsigned long)obi->addr & mask); - nbi->addr += hr; - - nbi->handle = obi->handle & mask; - nbi->handle += rx_ring->xsk_umem->headroom; + nbi->dma = obi->dma; + nbi->addr = obi->addr; + nbi->handle = obi->handle;
obi->addr = NULL; obi->skb = NULL; @@@ -248,8 -250,7 +248,8 @@@ void ixgbe_zca_free(struct zero_copy_al bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr;
- bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, + rx_ring->xsk_umem->headroom); }
static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, @@@ -275,7 -276,7 +275,7 @@@ bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr;
- bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr(umem); return true; @@@ -302,7 -303,7 +302,7 @@@ static bool ixgbe_alloc_buffer_slow_zc( bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr;
- bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr_rq(umem); return true; @@@ -546,14 -547,6 +546,14 @@@ int ixgbe_clean_rx_irq_zc(struct ixgbe_ q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes;
+ if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } return failure ? budget : (int)total_rx_packets; }
@@@ -622,8 -615,6 +622,8 @@@ static bool ixgbe_xmit_zc(struct ixgbe_ if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); }
return !!budget && work_done; @@@ -642,19 -633,17 +642,17 @@@ static void ixgbe_clean_xdp_tx_buffer(s bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) { + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; - u32 i = tx_ring->next_to_clean, xsk_frames = 0; - unsigned int budget = q_vector->tx.work_limit; struct xdp_umem *umem = tx_ring->xsk_umem; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; - bool xmit_done; + u32 xsk_frames = 0;
- tx_bi = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC(tx_ring, i); - i -= tx_ring->count; + tx_bi = &tx_ring->tx_buffer_info[ntc]; + tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
- do { + while (ntc != ntu) { if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break;
@@@ -670,22 -659,18 +668,18 @@@
tx_bi++; tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; + ntc++; + if (unlikely(ntc == tx_ring->count)) { + ntc = 0; tx_bi = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); }
/* issue prefetch for next Tx descriptor */ prefetch(tx_desc); + }
- /* update budget accounting */ - budget--; - } while (likely(budget)); - - i += tx_ring->count; - tx_ring->next_to_clean = i; + tx_ring->next_to_clean = ntc;
u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; @@@ -697,19 -682,10 +691,17 @@@ if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames);
+ if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { + if (tx_ring->next_to_clean == tx_ring->next_to_use) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); + } + - xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); - - return budget > 0 && xmit_done; + return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); }
-int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75e93ce2ed99,72872d6ca80c..076f2da36f27 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -30,6 -30,7 +30,7 @@@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/atomic.h> + #include <net/xfrm.h>
#include "ixgbevf.h"
@@@ -2261,14 -2262,12 +2262,14 @@@ static void ixgbevf_init_last_counter_s static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_14, - ixgbe_mbox_api_13, - ixgbe_mbox_api_12, - ixgbe_mbox_api_11, - ixgbe_mbox_api_10, - ixgbe_mbox_api_unknown }; + static const int api[] = { + ixgbe_mbox_api_14, + ixgbe_mbox_api_13, + ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown + }; int err, idx = 0;
spin_lock_bh(&adapter->mbx_lock); @@@ -2519,7 -2518,6 +2520,7 @@@ void ixgbevf_reinit_locked(struct ixgbe msleep(1);
ixgbevf_down(adapter); + pci_set_master(adapter->pdev); ixgbevf_up(adapter);
clear_bit(__IXGBEVF_RESETTING, &adapter->state); @@@ -3952,7 -3950,7 +3953,7 @@@ static void ixgbevf_tx_map(struct ixgbe struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@@ -4137,11 -4135,8 +4138,11 @@@ static int ixgbevf_xmit_frame_ring(stru * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag)); + } #else count += skb_shinfo(skb)->nr_frags; #endif @@@ -4167,7 -4162,7 +4168,7 @@@ first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC - if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index ef3f3d06ff1e,309470ec0219..fce9b3a24347 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -2240,7 -2240,7 +2240,7 @@@ static int mlx4_validate_optimized_stee for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_dev_port(dev, i, &port_cap)) { mlx4_err(dev, - "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); + "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n"); } else if ((dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_DEFAULT) && (port_cap.dmfs_optimized_state == @@@ -2292,31 -2292,23 +2292,31 @@@ static int mlx4_init_fw(struct mlx4_de static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_init_hca_param *init_hca = NULL; + struct mlx4_dev_cap *dev_cap = NULL; struct mlx4_adapter adapter; - struct mlx4_dev_cap dev_cap; struct mlx4_profile profile; - struct mlx4_init_hca_param init_hca; u64 icm_size; struct mlx4_config_dev_params params; int err;
if (!mlx4_is_slave(dev)) { - err = mlx4_dev_cap(dev, &dev_cap); + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL); + + if (!dev_cap || !init_hca) { + err = -ENOMEM; + goto out_free; + } + + err = mlx4_dev_cap(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); - return err; + goto out_free; }
- choose_steering_mode(dev, &dev_cap); - choose_tunnel_offload_mode(dev, &dev_cap); + choose_steering_mode(dev, dev_cap); + choose_tunnel_offload_mode(dev, dev_cap);
if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && mlx4_is_master(dev)) @@@ -2339,48 -2331,48 +2339,48 @@@ MLX4_STEERING_MODE_DEVICE_MANAGED) profile.num_mcg = MLX4_FS_NUM_MCG;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap, - &init_hca); + icm_size = mlx4_make_profile(dev, &profile, dev_cap, + init_hca); if ((long long) icm_size < 0) { err = icm_size; - return err; + goto out_free; }
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
if (enable_4k_uar || !dev->persist->num_vfs) { - init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + init_hca->log_uar_sz = ilog2(dev->caps.num_uars) + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; - init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; } else { - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); - init_hca.uar_page_sz = PAGE_SHIFT - 12; + init_hca->log_uar_sz = ilog2(dev->caps.num_uars); + init_hca->uar_page_sz = PAGE_SHIFT - 12; }
- init_hca.mw_enabled = 0; + init_hca->mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) - init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; + init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size); if (err) - return err; + goto out_free;
- err = mlx4_INIT_HCA(dev, &init_hca); + err = mlx4_INIT_HCA(dev, init_hca); if (err) { mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; }
- if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { - err = mlx4_query_func(dev, &dev_cap); + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, dev_cap); if (err < 0) { mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); goto err_close; } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { - dev->caps.num_eqs = dev_cap.max_eqs; - dev->caps.reserved_eqs = dev_cap.reserved_eqs; - dev->caps.reserved_uars = dev_cap.reserved_uars; + dev->caps.num_eqs = dev_cap->max_eqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_uars = dev_cap->reserved_uars; } }
@@@ -2389,13 -2381,14 +2389,13 @@@ * read HCA frequency by QUERY_HCA command */ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { - memset(&init_hca, 0, sizeof(init_hca)); - err = mlx4_QUERY_HCA(dev, &init_hca); + err = mlx4_QUERY_HCA(dev, init_hca); if (err) { mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = - init_hca.hca_core_clock; + init_hca->hca_core_clock; }
/* In case we got HCA frequency 0 - disable timestamping @@@ -2471,8 -2464,7 +2471,8 @@@ priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
- return 0; + err = 0; + goto out_free;
unmap_bf: unmap_internal_clock(dev); @@@ -2491,10 -2483,6 +2491,10 @@@ err_free_icm if (!mlx4_is_slave(dev)) mlx4_free_icms(dev);
+out_free: + kfree(dev_cap); + kfree(init_hca); + return err; }
@@@ -3931,43 -3919,26 +3931,43 @@@ static void mlx4_devlink_param_load_dri } }
-static int mlx4_devlink_reload(struct devlink *devlink, - struct netlink_ext_ack *extack) +static void mlx4_restart_one_down(struct pci_dev *pdev); +static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload, + struct devlink *devlink); + +static int mlx4_devlink_reload_down(struct devlink *devlink, + struct netlink_ext_ack *extack) { struct mlx4_priv *priv = devlink_priv(devlink); struct mlx4_dev *dev = &priv->dev; struct mlx4_dev_persistent *persist = dev->persist; - int err;
if (persist->num_vfs) mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); - err = mlx4_restart_one(persist->pdev, true, devlink); + mlx4_restart_one_down(persist->pdev); + return 0; +} + +static int mlx4_devlink_reload_up(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_dev_persistent *persist = dev->persist; + int err; + + err = mlx4_restart_one_up(persist->pdev, true, devlink); if (err) - mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err); + mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n", + err);
return err; }
static const struct devlink_ops mlx4_devlink_ops = { .port_type_set = mlx4_devlink_port_type_set, - .reload = mlx4_devlink_reload, + .reload_down = mlx4_devlink_reload_down, + .reload_up = mlx4_devlink_reload_up, };
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) @@@ -4180,13 -4151,7 +4180,13 @@@ static int restore_current_port_types(s return err; }
-int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) +static void mlx4_restart_one_down(struct pci_dev *pdev) +{ + mlx4_unload_one(pdev); +} + +static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload, + struct devlink *devlink) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; @@@ -4198,6 -4163,7 +4198,6 @@@ total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
- mlx4_unload_one(pdev); if (reload) mlx4_devlink_param_load_driverinit_values(devlink); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); @@@ -4216,12 -4182,6 +4216,12 @@@ return err; }
+int mlx4_restart_one(struct pci_dev *pdev) +{ + mlx4_restart_one_down(pdev); + return mlx4_restart_one_up(pdev, false, NULL); +} + #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT } #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF } #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } diff --combined drivers/net/ethernet/nvidia/forcedeth.c index ecca794c55e2,a6b4bfae4684..05d2b478c99b --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@@ -713,6 -713,21 +713,21 @@@ struct nv_skb_map struct nv_skb_map *next_tx_ctx; };
+ struct nv_txrx_stats { + u64 stat_rx_packets; + u64 stat_rx_bytes; /* not always available in HW */ + u64 stat_rx_missed_errors; + u64 stat_rx_dropped; + u64 stat_tx_packets; /* not always available in HW */ + u64 stat_tx_bytes; + u64 stat_tx_dropped; + }; + + #define nv_txrx_stats_inc(member) \ + __this_cpu_inc(np->txrx_stats->member) + #define nv_txrx_stats_add(member, count) \ + __this_cpu_add(np->txrx_stats->member, (count)) + /* * SMP locking: * All hardware access under netdev_priv(dev)->lock, except the performance @@@ -797,10 -812,7 +812,7 @@@ struct fe_priv
/* RX software stats */ struct u64_stats_sync swstats_rx_syncp; - u64 stat_rx_packets; - u64 stat_rx_bytes; /* not always available in HW */ - u64 stat_rx_missed_errors; - u64 stat_rx_dropped; + struct nv_txrx_stats __percpu *txrx_stats;
/* media detection workaround. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); @@@ -826,9 -838,6 +838,6 @@@
/* TX software stats */ struct u64_stats_sync swstats_tx_syncp; - u64 stat_tx_packets; /* not always available in HW */ - u64 stat_tx_bytes; - u64 stat_tx_dropped;
/* msi/msi-x fields */ u32 msi_flags; @@@ -1721,6 -1730,39 +1730,39 @@@ static void nv_update_stats(struct net_ } }
+ static void nv_get_stats(int cpu, struct fe_priv *np, + struct rtnl_link_stats64 *storage) + { + struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); + unsigned int syncp_start; + u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; + u64 tx_packets, tx_bytes, tx_dropped; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); + rx_packets = src->stat_rx_packets; + rx_bytes = src->stat_rx_bytes; + rx_dropped = src->stat_rx_dropped; + rx_missed_errors = src->stat_rx_missed_errors; + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); + + storage->rx_packets += rx_packets; + storage->rx_bytes += rx_bytes; + storage->rx_dropped += rx_dropped; + storage->rx_missed_errors += rx_missed_errors; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); + tx_packets = src->stat_tx_packets; + tx_bytes = src->stat_tx_bytes; + tx_dropped = src->stat_tx_dropped; + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + + storage->tx_packets += tx_packets; + storage->tx_bytes += tx_bytes; + storage->tx_dropped += tx_dropped; + } + /* * nv_get_stats64: dev->ndo_get_stats64 function * Get latest stats value from the nic. @@@ -1733,7 -1775,7 +1775,7 @@@ nv_get_stats64(struct net_device *dev, __releases(&netdev_priv(dev)->hwstats_lock) { struct fe_priv *np = netdev_priv(dev); - unsigned int syncp_start; + int cpu;
/* * Note: because HW stats are not always available and for @@@ -1746,20 -1788,8 +1788,8 @@@ */
/* software stats */ - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); - storage->rx_packets = np->stat_rx_packets; - storage->rx_bytes = np->stat_rx_bytes; - storage->rx_dropped = np->stat_rx_dropped; - storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); - - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); - storage->tx_packets = np->stat_tx_packets; - storage->tx_bytes = np->stat_tx_bytes; - storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + for_each_online_cpu(cpu) + nv_get_stats(cpu, np, storage);
/* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { @@@ -1827,7 -1857,7 +1857,7 @@@ static int nv_alloc_rx(struct net_devic } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@@ -1869,7 -1899,7 +1899,7 @@@ static int nv_alloc_rx_optimized(struc } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@@ -2013,7 -2043,7 +2043,7 @@@ static void nv_drain_tx(struct net_devi } if (nv_release_txskb(np, &np->tx_skb[i])) { u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); } np->tx_skb[i].dma = 0; @@@ -2227,7 -2257,7 +2257,7 @@@ static netdev_tx_t nv_start_xmit(struc /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@@ -2273,7 -2303,7 +2303,7 @@@ dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@@ -2384,7 -2414,7 +2414,7 @@@ static netdev_tx_t nv_start_xmit_optimi /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@@ -2431,7 -2461,7 +2461,7 @@@ dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@@ -2560,9 -2590,12 +2590,12 @@@ static int nv_tx_done(struct net_devic && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@@ -2577,9 -2610,12 +2610,12 @@@ && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@@ -2627,9 -2663,12 +2663,12 @@@ static int nv_tx_done_optimized(struct nv_legacybackoff_reseed(dev); } } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); }
@@@ -2806,6 -2845,15 +2845,15 @@@ static int nv_getlen(struct net_device } }
+ static void rx_missing_handler(u32 flags, struct fe_priv *np) + { + if (flags & NV_RX_MISSEDFRAME) { + u64_stats_update_begin(&np->swstats_rx_syncp); + nv_txrx_stats_inc(stat_rx_missed_errors); + u64_stats_update_end(&np->swstats_rx_syncp); + } + } + static int nv_rx_process(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); @@@ -2848,11 -2896,7 +2896,7 @@@ } /* the rest are hard errors */ else { - if (flags & NV_RX_MISSEDFRAME) { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_missed_errors++; - u64_stats_update_end(&np->swstats_rx_syncp); - } + rx_missing_handler(flags, np); dev_kfree_skb(skb); goto next_pkt; } @@@ -2896,8 -2940,8 +2940,8 @@@ skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) @@@ -2982,8 -3026,8 +3026,8 @@@ static int nv_rx_process_optimized(stru } napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); } else { dev_kfree_skb(skb); @@@ -5651,6 -5695,12 +5695,12 @@@ static int nv_probe(struct pci_dev *pci SET_NETDEV_DEV(dev, &pci_dev->dev); u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); + np->txrx_stats = alloc_percpu(struct nv_txrx_stats); + if (!np->txrx_stats) { + pr_err("np->txrx_stats, alloc memory error.\n"); + err = -ENOMEM; + goto out_alloc_percpu; + }
timer_setup(&np->oom_kick, nv_do_rx_refill, 0); timer_setup(&np->nic_poll, nv_do_nic_poll, 0); @@@ -6060,6 -6110,8 +6110,8 @@@ out_relreg out_disable: pci_disable_device(pci_dev); out_free: + free_percpu(np->txrx_stats); + out_alloc_percpu: free_netdev(dev); out: return err; @@@ -6105,6 -6157,9 +6157,9 @@@ static void nv_restore_mac_addr(struct static void nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); + struct fe_priv *np = netdev_priv(dev); + + free_percpu(np->txrx_stats);
unregister_netdev(dev);
@@@ -6126,7 -6181,8 +6181,7 @@@ #ifdef CONFIG_PM_SLEEP static int nv_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int i; diff --combined drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e5ca1f2685b6,3b12e7ad35e1..e29c47744ef5 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@@ -65,6 -65,7 +65,6 @@@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> -#include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/acpi.h>
@@@ -72,7 -73,6 +72,7 @@@
#include "iwl-trans.h" #include "iwl-drv.h" +#include "iwl-prph.h" #include "internal.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \ @@@ -994,22 -994,15 +994,22 @@@ static int iwl_pci_probe(struct pci_de const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; + unsigned long flags; int ret;
- if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n")) - return -EINVAL; - - iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans);
+ /* the trans_cfg should never change, so set it now */ + iwl_trans->trans_cfg = &cfg->trans; + + if (WARN_ONCE(!iwl_trans->trans_cfg->csr, + "CSR addresses aren't configured\n")) { + ret = -EINVAL; + goto out_free_trans; + } + #if IS_ENABLED(CONFIG_IWLMVM) /* * special-case 7265D, it has the same PCI IDs. @@@ -1025,70 -1018,29 +1025,70 @@@ else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) cfg = cfg_7265d; - iwl_trans->cfg = cfg_7265d; - }
- if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb && - iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) { - u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id); - u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF); - u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR); - - if (rf_id_chp == jf_chp_id) { - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwl9560_2ac_cfg_qnj_jf_b0; - else - cfg = &iwl22000_2ac_cfg_jf; - } else if (rf_id_chp == hr_chp_id) { - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwl22000_2ax_cfg_qnj_hr_a0; - else - cfg = &iwl22000_2ac_cfg_hr; + iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + + if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) { + cfg = &iwlax210_2ax_cfg_ty_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + cfg = &iwlax210_2ax_cfg_so_jf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { + cfg = &iwlax211_2ax_cfg_so_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { + cfg = &iwlax411_2ax_cfg_so_gf4_a0; + } + } else if (cfg == &iwl_ax101_cfg_qu_hr) { + if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) || + (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) { + cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + cfg = &iwl_ax101_cfg_qu_hr; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + cfg = &iwl22000_2ax_cfg_jf; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { + IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n"); + return -EINVAL; + } else { + IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n", + CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id)); + return -EINVAL; + } + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + ((cfg != &iwl_ax200_cfg_cc && + cfg != &killer1650x_2ax_cfg && + cfg != &killer1650w_2ax_cfg && + cfg != &iwl_ax201_cfg_quz_hr) || + iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { + u32 hw_status; + + hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS); + if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP) + /* + * b step fw is the same for physical card and fpga + */ + cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + else if ((hw_status & UMAG_GEN_HW_IS_FPGA) && + CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_A_STEP) { + cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0; + } else { + /* + * a step no FPGA + */ + cfg = &iwl22000_2ac_cfg_hr; } - iwl_trans->cfg = cfg; }
/* @@@ -1098,54 -1050,41 +1098,54 @@@ * thing to do to support Qu C-step. */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_qu_c0_hr_b0; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_qu_c0_hr_b0; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; }
/* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (cfg == &iwl_ax101_cfg_qu_hr) - cfg = &iwl_ax101_cfg_quz_hr; - else if (cfg == &iwl_ax201_cfg_qu_hr) - cfg = &iwl_ax201_cfg_quz_hr; - else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; + else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; + else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; }
#endif + /* now set the real cfg we decided to use */ + iwl_trans->cfg = cfg; + + if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 && + iwl_trans_grab_nic_access(iwl_trans, &flags)) { + u32 hw_step; + + hw_step = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); + hw_step |= ENABLE_WFPM; + iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, hw_step); + hw_step = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); + hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; + if (hw_step == 0x3) + iwl_trans->hw_rev = (iwl_trans->hw_rev & 0xFFFFFFF3) | + (SILICON_C_STEP << 2); + iwl_trans_release_nic_access(iwl_trans, &flags); + }
pci_set_drvdata(pdev, iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); @@@ -1158,6 -1097,25 +1158,6 @@@ /* register transport layer debugfs here */ iwl_trans_pcie_dbgfs_register(iwl_trans);
- /* if RTPM is in use, enable it in our device */ - if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { - /* We explicitly set the device to active here to - * clear contingent errors. - */ - pm_runtime_set_active(&pdev->dev); - - pm_runtime_set_autosuspend_delay(&pdev->dev, - iwlwifi_mod_params.d0i3_timeout); - pm_runtime_use_autosuspend(&pdev->dev); - - /* We are not supposed to call pm_runtime_allow() by - * ourselves, but let userspace enable runtime PM via - * sysfs. However, since we don't enable this from - * userspace yet, we need to allow/forbid() ourselves. - */ - pm_runtime_allow(&pdev->dev); - } - /* The PCI device starts with a reference taken and we are * supposed to release it here. But to simplify the * interaction with the opmode, we don't do it now, but let @@@ -1175,6 -1133,15 +1175,6 @@@ static void iwl_pci_remove(struct pci_d { struct iwl_trans *trans = pci_get_drvdata(pdev);
- /* if RTPM was in use, restore it to the state before probe */ - if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { - /* We should not call forbid here, but we do for now. - * Check the comment to pm_runtime_allow() in - * iwl_pci_probe(). - */ - pm_runtime_forbid(trans->dev); - } - iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans); @@@ -1232,9 -1199,164 +1232,9 @@@ static int iwl_pci_resume(struct devic return 0; }
-int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - if (test_bit(STATUS_FW_ERROR, &trans->status)) - return 0; - - set_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - - /* config the fw */ - ret = iwl_op_mode_enter_d0i3(trans->op_mode); - if (ret == 1) { - IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n"); - clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - return -EBUSY; - } - if (ret) - goto err; - - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - test_bit(STATUS_TRANS_IDLE, &trans->status), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout entering D0i3\n"); - ret = -ETIMEDOUT; - goto err; - } - - clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - - return 0; -err: - clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - iwl_trans_fw_error(trans); - return ret; -} - -int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - /* sometimes a D0i3 entry is not followed through */ - if (!test_bit(STATUS_TRANS_IDLE, &trans->status)) - return 0; - - /* config the fw */ - ret = iwl_op_mode_exit_d0i3(trans->op_mode); - if (ret) - goto err; - - /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */ - - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - !test_bit(STATUS_TRANS_IDLE, &trans->status), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D0i3\n"); - ret = -ETIMEDOUT; - goto err; - } - - return 0; -err: - clear_bit(STATUS_TRANS_IDLE, &trans->status); - iwl_trans_fw_error(trans); - return ret; -} - -#ifdef CONFIG_IWLWIFI_PCIE_RTPM -static int iwl_pci_runtime_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - int ret; - - IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); - - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { - ret = iwl_pci_fw_enter_d0i3(trans); - if (ret < 0) - return ret; - } - - trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; - - iwl_trans_d3_suspend(trans, false, false); - - return 0; -} - -static int iwl_pci_runtime_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - enum iwl_d3_status d3_status; - - IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); - - iwl_trans_d3_resume(trans, &d3_status, false, false); - - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - return iwl_pci_fw_exit_d0i3(trans); - - return 0; -} - -static int iwl_pci_system_prepare(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - - IWL_DEBUG_RPM(trans, "preparing for system suspend\n"); - - /* This is called before entering system suspend and before - * the runtime resume is called. Set the suspending flag to - * prevent the wakelock from being taken. - */ - trans->suspending = true; - - /* Wake the device up from runtime suspend before going to - * platform suspend. This is needed because we don't know - * whether wowlan any is set and, if it's not, mac80211 will - * disconnect (in which case, we can't be in D0i3). - */ - pm_runtime_resume(device); - - return 0; -} - -static void iwl_pci_system_complete(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - - IWL_DEBUG_RPM(trans, "completing system suspend\n"); - - /* This is called as a counterpart to the prepare op. It is - * called either when suspending fails or when suspend - * completed successfully. Now there's no risk of grabbing - * the wakelock anymore, so we can release the suspending - * flag. - */ - trans->suspending = false; -} -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ - static const struct dev_pm_ops iwl_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, iwl_pci_resume) -#ifdef CONFIG_IWLWIFI_PCIE_RTPM - SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, - iwl_pci_runtime_resume, - NULL) - .prepare = iwl_pci_system_prepare, - .complete = iwl_pci_system_complete, -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ };
#define IWL_PM_OPS (&iwl_dev_pm_ops) diff --combined drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index f84a7df296ea,6117e6ca08cb..7705e55aa3d1 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@@ -1,6 -1,17 +1,6 @@@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau nbd@nbd.name - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include <linux/kernel.h> @@@ -51,6 -62,19 +51,19 @@@ static void mt76x0e_stop(struct ieee802 mt76x0e_stop_hw(dev); }
+ static int + mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) + { + struct mt76x02_dev *dev = hw->priv; + + if (is_mt7630(dev)) + return -EOPNOTSUPP; + + return mt76x02_set_key(hw, cmd, vif, sta, key); + } + static void mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) @@@ -67,9 -91,9 +80,9 @@@ static const struct ieee80211_ops mt76x .configure_filter = mt76x02_configure_filter, .bss_info_changed = mt76x02_bss_info_changed, .sta_state = mt76_sta_state, - .set_key = mt76x02_set_key, + .set_key = mt76x0e_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x02_sw_scan, + .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .ampdu_action = mt76x02_ampdu_action, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, diff --combined kernel/bpf/verifier.c index 3fb50757e812,c36a719fee6d..315798037d6c --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -1772,16 -1772,21 +1772,21 @@@ static int __mark_chain_precision(struc bitmap_from_u64(mask, stack_mask); for_each_set_bit(i, mask, 64) { if (i >= func->allocated_stack / BPF_REG_SIZE) { - /* This can happen if backtracking - * is propagating stack precision where - * caller has larger stack frame - * than callee, but backtrack_insn() should - * have returned -ENOTSUPP. + /* the sequence of instructions: + * 2: (bf) r3 = r10 + * 3: (7b) *(u64 *)(r3 -8) = r0 + * 4: (79) r4 = *(u64 *)(r10 -8) + * doesn't contain jmps. It's backtracked + * as a single block. + * During backtracking insn 3 is not recognized as + * stack access, so at the end of backtracking + * stack slot fp-8 is still marked in stack_mask. + * However the parent state may not have accessed + * fp-8 and it's "unallocated" stack space. + * In such case fallback to conservative. */ - verbose(env, "BUG spi %d stack_size %d\n", - i, func->allocated_stack); - WARN_ONCE(1, "verifier backtracking bug"); - return -EFAULT; + mark_all_scalars_precise(env, st); + return 0; }
if (func->stack[i].slot_type[0] != STACK_SPILL) { @@@ -3458,7 -3463,6 +3463,7 @@@ static int check_map_func_compatibility goto error; break; case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: if (func_id != BPF_FUNC_redirect_map && func_id != BPF_FUNC_map_lookup_elem) goto error; @@@ -3541,7 -3545,6 +3546,7 @@@ break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && + map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; @@@ -7223,7 -7226,7 +7228,7 @@@ static int is_state_visited(struct bpf_ struct bpf_verifier_state_list *sl, **pprev; struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; - bool add_new_state = false; + bool add_new_state = env->test_state_freq ? true : false;
cur->last_insn_idx = env->prev_insn_idx; if (!env->insn_aux_data[insn_idx].prune_point) @@@ -9263,9 -9266,6 +9268,9 @@@ int bpf_check(struct bpf_prog **prog, u
env->allow_ptr_leaks = is_priv;
+ if (is_priv) + env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; + ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; diff --combined net/bridge/br_mdb.c index 44594635a972,63f9c08625f0..da5ed4cf9233 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@@ -60,8 -60,6 +60,8 @@@ static void __mdb_entry_fill_flags(stru e->flags = 0; if (flags & MDB_PG_FLAGS_OFFLOAD) e->flags |= MDB_FLAGS_OFFLOAD; + if (flags & MDB_PG_FLAGS_FAST_LEAVE) + e->flags |= MDB_FLAGS_FAST_LEAVE; }
static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) @@@ -77,53 -75,6 +77,53 @@@ #endif }
+static int __mdb_fill_info(struct sk_buff *skb, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *p) +{ + struct timer_list *mtimer; + struct nlattr *nest_ent; + struct br_mdb_entry e; + u8 flags = 0; + int ifindex; + + memset(&e, 0, sizeof(e)); + if (p) { + ifindex = p->port->dev->ifindex; + mtimer = &p->timer; + flags = p->flags; + } else { + ifindex = mp->br->dev->ifindex; + mtimer = &mp->timer; + } + + __mdb_entry_fill_flags(&e, flags); + e.ifindex = ifindex; + e.vid = mp->addr.vid; + if (mp->addr.proto == htons(ETH_P_IP)) + e.addr.u.ip4 = mp->addr.u.ip4; +#if IS_ENABLED(CONFIG_IPV6) + if (mp->addr.proto == htons(ETH_P_IPV6)) + e.addr.u.ip6 = mp->addr.u.ip6; +#endif + e.addr.proto = mp->addr.proto; + nest_ent = nla_nest_start_noflag(skb, + MDBA_MDB_ENTRY_INFO); + if (!nest_ent) + return -EMSGSIZE; + + if (nla_put_nohdr(skb, sizeof(e), &e) || + nla_put_u32(skb, + MDBA_MDB_EATTR_TIMER, + br_timer_value(mtimer))) { + nla_nest_cancel(skb, nest_ent); + return -EMSGSIZE; + } + nla_nest_end(skb, nest_ent); + + return 0; +} + static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev) { @@@ -142,6 -93,7 +142,6 @@@ hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; - struct net_bridge_port *port;
if (idx < s_idx) goto skip; @@@ -152,24 -104,43 +152,24 @@@ break; }
+ if (mp->host_joined) { + err = __mdb_fill_info(skb, mp, NULL); + if (err) { + nla_nest_cancel(skb, nest2); + break; + } + } + for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; pp = &p->next) { - struct nlattr *nest_ent; - struct br_mdb_entry e; - - port = p->port; - if (!port) + if (!p->port) continue;
- memset(&e, 0, sizeof(e)); - e.ifindex = port->dev->ifindex; - e.vid = p->addr.vid; - __mdb_entry_fill_flags(&e, p->flags); - if (p->addr.proto == htons(ETH_P_IP)) - e.addr.u.ip4 = p->addr.u.ip4; -#if IS_ENABLED(CONFIG_IPV6) - if (p->addr.proto == htons(ETH_P_IPV6)) - e.addr.u.ip6 = p->addr.u.ip6; -#endif - e.addr.proto = p->addr.proto; - nest_ent = nla_nest_start_noflag(skb, - MDBA_MDB_ENTRY_INFO); - if (!nest_ent) { - nla_nest_cancel(skb, nest2); - err = -EMSGSIZE; - goto out; - } - if (nla_put_nohdr(skb, sizeof(e), &e) || - nla_put_u32(skb, - MDBA_MDB_EATTR_TIMER, - br_timer_value(&p->timer))) { - nla_nest_cancel(skb, nest_ent); + err = __mdb_fill_info(skb, mp, p); + if (err) { nla_nest_cancel(skb, nest2); - err = -EMSGSIZE; goto out; } - nla_nest_end(skb, nest_ent); } nla_nest_end(skb, nest2); skip: @@@ -466,7 -437,7 +466,7 @@@ static int nlmsg_populate_rtr_fill(stru struct nlmsghdr *nlh; struct nlattr *nest;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); if (!nlh) return -EMSGSIZE;
@@@ -616,19 -587,6 +616,19 @@@ static int br_mdb_add_group(struct net_ return err; }
+ /* host join */ + if (!port) { + /* don't allow any flags for host-joined groups */ + if (state) + return -EINVAL; + if (mp->host_joined) + return -EEXIST; + + br_multicast_host_join(mp, false); + + return 0; + } + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { @@@ -653,21 -611,19 +653,21 @@@ static int __br_mdb_add(struct net *net { struct br_ip ip; struct net_device *dev; - struct net_bridge_port *p; + struct net_bridge_port *p = NULL; int ret;
if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) return -EINVAL;
- dev = __dev_get_by_index(net, entry->ifindex); - if (!dev) - return -ENODEV; + if (entry->ifindex != br->dev->ifindex) { + dev = __dev_get_by_index(net, entry->ifindex); + if (!dev) + return -ENODEV;
- p = br_port_get_rtnl(dev); - if (!p || p->br != br || p->state == BR_STATE_DISABLED) - return -EINVAL; + p = br_port_get_rtnl(dev); + if (!p || p->br != br || p->state == BR_STATE_DISABLED) + return -EINVAL; + }
__mdb_entry_to_br_ip(entry, &ip);
@@@ -682,9 -638,9 +682,9 @@@ static int br_mdb_add(struct sk_buff *s { struct net *net = sock_net(skb->sk); struct net_bridge_vlan_group *vg; + struct net_bridge_port *p = NULL; struct net_device *dev, *pdev; struct br_mdb_entry *entry; - struct net_bridge_port *p; struct net_bridge_vlan *v; struct net_bridge *br; int err; @@@ -695,22 -651,18 +695,22 @@@
br = netdev_priv(dev);
+ if (entry->ifindex != br->dev->ifindex) { + pdev = __dev_get_by_index(net, entry->ifindex); + if (!pdev) + return -ENODEV; + + p = br_port_get_rtnl(pdev); + if (!p || p->br != br || p->state == BR_STATE_DISABLED) + return -EINVAL; + vg = nbp_vlan_group(p); + } else { + vg = br_vlan_group(br); + } + /* If vlan filtering is enabled and VLAN is not specified * install mdb entry on all vlans configured on the port. */ - pdev = __dev_get_by_index(net, entry->ifindex); - if (!pdev) - return -ENODEV; - - p = br_port_get_rtnl(pdev); - if (!p || p->br != br || p->state == BR_STATE_DISABLED) - return -EINVAL; - - vg = nbp_vlan_group(p); if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { list_for_each_entry(v, &vg->vlan_list, vlist) { entry->vid = v->vid; @@@ -746,15 -698,6 +746,15 @@@ static int __br_mdb_del(struct net_brid if (!mp) goto unlock;
+ /* host leave */ + if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) { + br_multicast_host_leave(mp, false); + err = 0; + if (!mp->ports && netif_running(br->dev)) + mod_timer(&mp->timer, jiffies); + goto unlock; + } + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { @@@ -787,9 -730,9 +787,9 @@@ static int br_mdb_del(struct sk_buff *s { struct net *net = sock_net(skb->sk); struct net_bridge_vlan_group *vg; + struct net_bridge_port *p = NULL; struct net_device *dev, *pdev; struct br_mdb_entry *entry; - struct net_bridge_port *p; struct net_bridge_vlan *v; struct net_bridge *br; int err; @@@ -800,22 -743,18 +800,22 @@@
br = netdev_priv(dev);
+ if (entry->ifindex != br->dev->ifindex) { + pdev = __dev_get_by_index(net, entry->ifindex); + if (!pdev) + return -ENODEV; + + p = br_port_get_rtnl(pdev); + if (!p || p->br != br || p->state == BR_STATE_DISABLED) + return -EINVAL; + vg = nbp_vlan_group(p); + } else { + vg = br_vlan_group(br); + } + /* If vlan filtering is enabled and VLAN is not specified * delete mdb entry on all vlans configured on the port. */ - pdev = __dev_get_by_index(net, entry->ifindex); - if (!pdev) - return -ENODEV; - - p = br_port_get_rtnl(pdev); - if (!p || p->br != br || p->state == BR_STATE_DISABLED) - return -EINVAL; - - vg = nbp_vlan_group(p); if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { list_for_each_entry(v, &vg->vlan_list, vlist) { entry->vid = v->vid; diff --combined net/core/dev.c index b1afafee3e2a,5156c0edebe8..a9775d676285 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -3963,8 -3963,6 +3963,8 @@@ int dev_weight_rx_bias __read_mostly = int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ int dev_rx_weight __read_mostly = 64; int dev_tx_weight __read_mostly = 64; +/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ +int gro_normal_batch __read_mostly = 8;
/* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, @@@ -5488,7 -5486,7 +5488,7 @@@ static void gro_pull_from_frag0(struct skb->data_len -= grow; skb->tail += grow;
- pinfo->frags[0].page_offset += grow; + skb_frag_off_add(&pinfo->frags[0], grow); skb_frag_size_sub(&pinfo->frags[0], grow);
if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { @@@ -5749,26 -5747,6 +5749,26 @@@ struct sk_buff *napi_get_frags(struct n } EXPORT_SYMBOL(napi_get_frags);
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ +static void gro_normal_list(struct napi_struct *napi) +{ + if (!napi->rx_count) + return; + netif_receive_skb_list_internal(&napi->rx_list); + INIT_LIST_HEAD(&napi->rx_list); + napi->rx_count = 0; +} + +/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, + * pass the whole batch up to the stack. + */ +static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) +{ + list_add_tail(&skb->list, &napi->rx_list); + if (++napi->rx_count >= gro_normal_batch) + gro_normal_list(napi); +} + static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret) @@@ -5778,8 -5756,8 +5778,8 @@@ case GRO_HELD: __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, skb->dev); - if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) - ret = GRO_DROP; + if (ret == GRO_NORMAL) + gro_normal_one(napi, skb); break;
case GRO_DROP: @@@ -6056,8 -6034,6 +6056,8 @@@ bool napi_complete_done(struct napi_str NAPIF_STATE_IN_BUSY_POLL))) return false;
+ gro_normal_list(n); + if (n->gro_bitmask) { unsigned long timeout = 0;
@@@ -6143,19 -6119,10 +6143,19 @@@ static void busy_poll_stop(struct napi_ * Ideally, a new ndo_busy_poll_stop() could avoid another round. */ rc = napi->poll(napi, BUSY_POLL_BUDGET); + /* We can't gro_normal_list() here, because napi->poll() might have + * rearmed the napi (napi_complete_done()) in which case it could + * already be running on another CPU. + */ trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); netpoll_poll_unlock(have_poll_lock); - if (rc == BUSY_POLL_BUDGET) + if (rc == BUSY_POLL_BUDGET) { + /* As the whole budget was spent, we still own the napi so can + * safely handle the rx_list. + */ + gro_normal_list(napi); __napi_schedule(napi); + } local_bh_enable(); }
@@@ -6200,7 -6167,6 +6200,7 @@@ restart } work = napi_poll(napi, BUSY_POLL_BUDGET); trace_napi_poll(napi, work, BUSY_POLL_BUDGET); + gro_normal_list(napi); count: if (work > 0) __NET_ADD_STATS(dev_net(napi->dev), @@@ -6306,8 -6272,6 +6306,8 @@@ void netif_napi_add(struct net_device * napi->timer.function = napi_watchdog; init_gro_hash(napi); napi->skb = NULL; + INIT_LIST_HEAD(&napi->rx_list); + napi->rx_count = 0; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) netdev_err_once(dev, "%s() called with weight %d\n", __func__, @@@ -6404,8 -6368,6 +6404,8 @@@ static int napi_poll(struct napi_struc goto out_unlock; }
+ gro_normal_list(n); + if (n->gro_bitmask) { /* flush too old packets * If HZ < 1000, flush all packets. @@@ -8126,15 -8088,12 +8126,15 @@@ int dev_change_xdp_fd(struct net_devic bpf_chk = generic_xdp_install;
if (fd >= 0) { + u32 prog_id; + if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); return -EEXIST; } - if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && - __dev_xdp_query(dev, bpf_op, query)) { + + prog_id = __dev_xdp_query(dev, bpf_op, query); + if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) { NL_SET_ERR_MSG(extack, "XDP program already attached"); return -EBUSY; } @@@ -8149,14 -8108,6 +8149,14 @@@ bpf_prog_put(prog); return -EINVAL; } + + if (prog->aux->id == prog_id) { + bpf_prog_put(prog); + return 0; + } + } else { + if (!__dev_xdp_query(dev, bpf_op, query)) + return 0; }
err = dev_xdp_install(dev, bpf_op, extack, flags, prog); @@@ -8807,6 -8758,8 +8807,8 @@@ int register_netdevice(struct net_devic ret = notifier_to_errno(ret); if (ret) { rollback_registered(dev); + rcu_barrier(); + dev->reg_state = NETREG_UNREGISTERED; } /* diff --combined net/core/skbuff.c index 2b40b5a9425b,982d8d12830e..f12e8a050edb --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -785,7 -785,7 +785,7 @@@ void skb_dump(const char *level, const struct page *p; u8 *vaddr;
- skb_frag_foreach_page(frag, frag->page_offset, + skb_frag_foreach_page(frag, skb_frag_off(frag), skb_frag_size(frag), p, p_off, p_len, copied) { seg_len = min_t(int, p_len, len); @@@ -1375,7 -1375,7 +1375,7 @@@ int skb_copy_ubufs(struct sk_buff *skb struct page *p; u8 *vaddr;
- skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f), + skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), p, p_off, p_len, copied) { u32 copy, done = 0; vaddr = kmap_atomic(p); @@@ -2144,12 -2144,10 +2144,12 @@@ pull_pages skb_frag_unref(skb, i); eat -= size; } else { - skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; + + *frag = skb_shinfo(skb)->frags[i]; if (eat) { - skb_shinfo(skb)->frags[k].page_offset += eat; - skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); + skb_frag_off_add(frag, eat); + skb_frag_size_sub(frag, eat); if (!i) goto end; eat = 0; @@@ -2221,7 -2219,7 +2221,7 @@@ int skb_copy_bits(const struct sk_buff copy = len;
skb_frag_foreach_page(f, - f->page_offset + offset - start, + skb_frag_off(f) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); memcpy(to + copied, vaddr + p_off, p_len); @@@ -2397,7 -2395,7 +2397,7 @@@ static bool __skb_splice_bits(struct sk const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(skb_frag_page(f), - f->page_offset, skb_frag_size(f), + skb_frag_off(f), skb_frag_size(f), offset, len, spd, false, sk, pipe)) return true; } @@@ -2487,20 -2485,20 +2487,20 @@@ do_frag_list for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
- if (offset < frag->size) + if (offset < skb_frag_size(frag)) break;
- offset -= frag->size; + offset -= skb_frag_size(frag); }
for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
- slen = min_t(size_t, len, frag->size - offset); + slen = min_t(size_t, len, skb_frag_size(frag) - offset);
while (slen) { - ret = kernel_sendpage_locked(sk, frag->page.p, - frag->page_offset + offset, + ret = kernel_sendpage_locked(sk, skb_frag_page(frag), + skb_frag_off(frag) + offset, slen, MSG_DONTWAIT); if (ret <= 0) goto error; @@@ -2582,7 -2580,7 +2582,7 @@@ int skb_store_bits(struct sk_buff *skb copy = len;
skb_frag_foreach_page(frag, - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); memcpy(vaddr + p_off, from + copied, p_len); @@@ -2662,7 -2660,7 +2662,7 @@@ __wsum __skb_checksum(const struct sk_b copy = len;
skb_frag_foreach_page(frag, - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); csum2 = INDIRECT_CALL_1(ops->update, @@@ -2761,7 -2759,7 +2761,7 @@@ __wsum skb_copy_and_csum_bits(const str copy = len;
skb_frag_foreach_page(frag, - frag->page_offset + offset - start, + skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); csum2 = csum_partial_copy_nocheck(vaddr + p_off, @@@ -2977,15 -2975,11 +2977,15 @@@ skb_zerocopy(struct sk_buff *to, struc skb_zerocopy_clone(to, from, GFP_ATOMIC);
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { + int size; + if (!len) break; skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; - skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); - len -= skb_shinfo(to)->frags[j].size; + size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), + len); + skb_frag_size_set(&skb_shinfo(to)->frags[j], size); + len -= size; skb_frag_ref(to, j); j++; } @@@ -3236,7 -3230,7 +3236,7 @@@ static inline void skb_split_no_header( * 2. Split is accurately. We make this. */ skb_frag_ref(skb, i); - skb_shinfo(skb1)->frags[0].page_offset += len - pos; + skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); skb_shinfo(skb)->nr_frags++; @@@ -3299,7 -3293,7 +3299,7 @@@ static int skb_prepare_for_shift(struc int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) { int from, to, merge, todo; - struct skb_frag_struct *fragfrom, *fragto; + skb_frag_t *fragfrom, *fragto;
BUG_ON(shiftlen > skb->len);
@@@ -3318,7 -3312,7 +3318,7 @@@ */ if (!to || !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), - fragfrom->page_offset)) { + skb_frag_off(fragfrom))) { merge = -1; } else { merge = to - 1; @@@ -3335,7 -3329,7 +3335,7 @@@
skb_frag_size_add(fragto, shiftlen); skb_frag_size_sub(fragfrom, shiftlen); - fragfrom->page_offset += shiftlen; + skb_frag_off_add(fragfrom, shiftlen);
goto onlymerged; } @@@ -3366,11 -3360,11 +3366,11 @@@
} else { __skb_frag_ref(fragfrom); - fragto->page = fragfrom->page; - fragto->page_offset = fragfrom->page_offset; + skb_frag_page_copy(fragto, fragfrom); + skb_frag_off_copy(fragto, fragfrom); skb_frag_size_set(fragto, todo);
- fragfrom->page_offset += todo; + skb_frag_off_add(fragfrom, todo); skb_frag_size_sub(fragfrom, todo); todo = 0;
@@@ -3495,7 -3489,7 +3495,7 @@@ next_skb if (!st->frag_data) st->frag_data = kmap_atomic(skb_frag_page(frag));
- *data = (u8 *) st->frag_data + frag->page_offset + + *data = (u8 *) st->frag_data + skb_frag_off(frag) + (abs_offset - st->stepped_offset);
return block_limit - abs_offset; @@@ -3631,10 -3625,10 +3631,10 @@@ static inline skb_frag_t skb_head_frag_ struct page *page;
page = virt_to_head_page(frag_skb->head); - head_frag.page.p = page; - head_frag.page_offset = frag_skb->data - - (unsigned char *)page_address(page); - head_frag.size = skb_headlen(frag_skb); + __skb_frag_set_page(&head_frag, page); + skb_frag_off_set(&head_frag, frag_skb->data - + (unsigned char *)page_address(page)); + skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); return head_frag; }
@@@ -3670,6 -3664,25 +3670,25 @@@ struct sk_buff *skb_segment(struct sk_b int pos; int dummy;
+ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && + (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { + /* gso_size is untrusted, and we have a frag_list with a linear + * non head_frag head. + * + * (we assume checking the first list_skb member suffices; + * i.e if either of the list_skb members have non head_frag + * head, then the first one has too). + * + * If head_skb's headlen does not fit requested gso_size, it + * means that the frag_list members do NOT terminate on exact + * gso_size boundaries. Hence we cannot perform skb_frag_t page + * sharing. Therefore we must fallback to copying the frag_list + * skbs; we do so by disabling SG. + */ + if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) + features &= ~NETIF_F_SG; + } + __skb_push(head_skb, doffset); proto = skb_network_protocol(head_skb, &dummy); if (unlikely(!proto)) @@@ -3877,7 -3890,7 +3896,7 @@@ normal size = skb_frag_size(nskb_frag);
if (pos < offset) { - nskb_frag->page_offset += offset - pos; + skb_frag_off_add(nskb_frag, offset - pos); skb_frag_size_sub(nskb_frag, offset - pos); }
@@@ -3998,7 -4011,7 +4017,7 @@@ int skb_gro_receive(struct sk_buff *p, *--frag = *--frag2; } while (--i);
- frag->page_offset += offset; + skb_frag_off_add(frag, offset); skb_frag_size_sub(frag, offset);
/* all fragments truesize : remove (head size + sk_buff) */ @@@ -4027,8 -4040,8 +4046,8 @@@
pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
- frag->page.p = page; - frag->page_offset = first_offset; + __skb_frag_set_page(frag, page); + skb_frag_off_set(frag, first_offset); skb_frag_size_set(frag, first_size);
memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); @@@ -4044,7 -4057,7 +4063,7 @@@ merge if (offset > headlen) { unsigned int eat = offset - headlen;
- skbinfo->frags[0].page_offset += eat; + skb_frag_off_add(&skbinfo->frags[0], eat); skb_frag_size_sub(&skbinfo->frags[0], eat); skb->data_len -= eat; skb->len -= eat; @@@ -4087,9 -4100,6 +4106,9 @@@ static const u8 skb_ext_type_len[] = #ifdef CONFIG_XFRM [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), +#endif };
static __always_inline unsigned int skb_ext_total_length(void) @@@ -4100,9 -4110,6 +4119,9 @@@ #endif #ifdef CONFIG_XFRM skb_ext_type_len[SKB_EXT_SEC_PATH] + +#endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + skb_ext_type_len[TC_SKB_EXT] + #endif 0; } @@@ -4175,7 -4182,7 +4194,7 @@@ __skb_to_sgvec(struct sk_buff *skb, str if (copy > len) copy = len; sg_set_page(&sg[elt], skb_frag_page(frag), copy, - frag->page_offset+offset-start); + skb_frag_off(frag) + offset - start); elt++; if (!(len -= copy)) return elt; @@@ -5846,7 -5853,7 +5865,7 @@@ static int pskb_carve_inside_nonlinear( * where splitting is expensive. * 2. Split is accurately. We make this. */ - shinfo->frags[0].page_offset += off - pos; + skb_frag_off_add(&shinfo->frags[0], off - pos); skb_frag_size_sub(&shinfo->frags[0], off - pos); } skb_frag_ref(skb, i); diff --combined net/core/sock_map.c index 01998860afaa,50916f9bc4f2..eb114ee419b6 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@@ -345,7 -345,7 +345,7 @@@ static int sock_map_update_common(struc return -EINVAL; if (unlikely(idx >= map->max_entries)) return -E2BIG; - if (unlikely(icsk->icsk_ulp_data)) + if (unlikely(rcu_access_pointer(icsk->icsk_ulp_data))) return -EINVAL;
link = sk_psock_init_link(); @@@ -656,6 -656,7 +656,7 @@@ static int sock_hash_update_common(stru struct sock *sk, u64 flags) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct inet_connection_sock *icsk = inet_csk(sk); u32 key_size = map->key_size, hash; struct bpf_htab_elem *elem, *elem_new; struct bpf_htab_bucket *bucket; @@@ -666,6 -667,8 +667,8 @@@ WARN_ON_ONCE(!rcu_read_lock_held()); if (unlikely(flags > BPF_EXIST)) return -EINVAL; + if (unlikely(icsk->icsk_ulp_data)) + return -EINVAL;
link = sk_psock_init_link(); if (!link) diff --combined net/ipv4/tcp_input.c index 706cbb3b2986,8a1cd93dbb09..7e94223fdb2b --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -266,7 -266,7 +266,7 @@@ static void tcp_ecn_accept_cwr(struct s
static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; }
static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) @@@ -3782,49 -3782,6 +3782,49 @@@ static void smc_parse_options(const str #endif }
+/* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped + * value on success. + */ +static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss) +{ + const unsigned char *ptr = (const unsigned char *)(th + 1); + int length = (th->doff * 4) - sizeof(struct tcphdr); + u16 mss = 0; + + while (length > 0) { + int opcode = *ptr++; + int opsize; + + switch (opcode) { + case TCPOPT_EOL: + return mss; + case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ + length--; + continue; + default: + if (length < 2) + return mss; + opsize = *ptr++; + if (opsize < 2) /* "silly options" */ + return mss; + if (opsize > length) + return mss; /* fail on partial options */ + if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) { + u16 in_mss = get_unaligned_be16(ptr); + + if (in_mss) { + if (user_mss && user_mss < in_mss) + in_mss = user_mss; + mss = in_mss; + } + } + ptr += opsize - 2; + length -= opsize; + } + } + return mss; +} + /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when * the fast version below fails. @@@ -6465,7 -6422,9 +6465,7 @@@ EXPORT_SYMBOL(inet_reqsk_alloc) /* * Return true if a syncookie should be sent */ -static bool tcp_syn_flood_action(const struct sock *sk, - const struct sk_buff *skb, - const char *proto) +static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) { struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; const char *msg = "Dropping request"; @@@ -6485,7 -6444,7 +6485,7 @@@ net->ipv4.sysctl_tcp_syncookies != 2 && xchg(&queue->synflood_warned, 1) == 0) net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", - proto, ntohs(tcp_hdr(skb)->dest), msg); + proto, sk->sk_num, msg);
return want_cookie; } @@@ -6507,36 -6466,6 +6507,36 @@@ static void tcp_reqsk_record_syn(const } }
+/* If a SYN cookie is required and supported, returns a clamped MSS value to be + * used for SYN cookie generation. + */ +u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, + const struct tcp_request_sock_ops *af_ops, + struct sock *sk, struct tcphdr *th) +{ + struct tcp_sock *tp = tcp_sk(sk); + u16 mss; + + if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 && + !inet_csk_reqsk_queue_is_full(sk)) + return 0; + + if (!tcp_syn_flood_action(sk, rsk_ops->slab_name)) + return 0; + + if (sk_acceptq_is_full(sk)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); + return 0; + } + + mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss); + if (!mss) + mss = af_ops->mss_clamp; + + return mss; +} +EXPORT_SYMBOL_GPL(tcp_get_syncookie_mss); + int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) @@@ -6558,7 -6487,7 +6558,7 @@@ */ if ((net->ipv4.sysctl_tcp_syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { - want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); + want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name); if (!want_cookie) goto drop; } diff --combined net/ipv6/route.c index 874641d4d2a1,546088e50815..a63ff85fe141 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -227,7 -227,7 +227,7 @@@ static void ip6_confirm_neigh(const str struct net_device *dev = dst->dev; struct rt6_info *rt = (struct rt6_info *)dst;
- daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr); + daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr); if (!daddr) return; if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) @@@ -2725,9 -2725,10 +2725,9 @@@ static void __ip6_rt_update_pmtu(struc
rcu_read_lock(); res.f6i = rcu_dereference(rt6->from); - if (!res.f6i) { - rcu_read_unlock(); - return; - } + if (!res.f6i) + goto out_unlock; + res.fib6_flags = res.f6i->fib6_flags; res.fib6_type = res.f6i->fib6_type;
@@@ -2743,8 -2744,10 +2743,8 @@@ /* fib6_info uses a nexthop that does not have fib6_nh * using the dst->dev + gw. Should be impossible. */ - if (!arg.match) { - rcu_read_unlock(); - return; - } + if (!arg.match) + goto out_unlock;
res.nh = arg.match; } else { @@@ -2757,7 -2760,6 +2757,7 @@@ if (rt6_insert_exception(nrt6, &res)) dst_release_immediate(&nrt6->dst); } +out_unlock: rcu_read_unlock(); } } @@@ -4386,13 -4388,14 +4386,14 @@@ struct fib6_info *addrconf_f6i_alloc(st struct fib6_config cfg = { .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL, .fc_ifindex = idev->dev->ifindex, - .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP, + .fc_flags = RTF_UP | RTF_NONEXTHOP, .fc_dst = *addr, .fc_dst_len = 128, .fc_protocol = RTPROT_KERNEL, .fc_nlinfo.nl_net = net, .fc_ignore_dev_down = true, }; + struct fib6_info *f6i;
if (anycast) { cfg.fc_type = RTN_ANYCAST; @@@ -4402,7 -4405,10 +4403,10 @@@ cfg.fc_flags |= RTF_LOCAL; }
- return ip6_route_info_create(&cfg, gfp_flags, NULL); + f6i = ip6_route_info_create(&cfg, gfp_flags, NULL); + if (!IS_ERR(f6i)) + f6i->dst_nocount = true; + return f6i; }
/* remove deleted ip from prefsrc entries */ @@@ -5323,11 -5329,11 +5327,11 @@@ static int rt6_fill_node_nexthop(struc if (nexthop_is_multipath(nh)) { struct nlattr *mp;
- mp = nla_nest_start(skb, RTA_MULTIPATH); + mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure;
- if (nexthop_mpath_fill_node(skb, nh)) + if (nexthop_mpath_fill_node(skb, nh, AF_INET6)) goto nla_put_failure;
nla_nest_end(skb, mp); @@@ -5335,7 -5341,7 +5339,7 @@@ struct fib6_nh *fib6_nh;
fib6_nh = nexthop_fib6_nh(nh); - if (fib_nexthop_info(skb, &fib6_nh->nh_common, + if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6, flags, false) < 0) goto nla_put_failure; } @@@ -5464,13 -5470,14 +5468,14 @@@ static int rt6_fill_node(struct net *ne goto nla_put_failure;
if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, - rt->fib6_nh->fib_nh_weight) < 0) + rt->fib6_nh->fib_nh_weight, AF_INET6) < 0) goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling, &rt->fib6_siblings, fib6_siblings) { if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, - sibling->fib6_nh->fib_nh_weight) < 0) + sibling->fib6_nh->fib_nh_weight, + AF_INET6) < 0) goto nla_put_failure; }
@@@ -5487,7 -5494,7 +5492,7 @@@
rtm->rtm_flags |= nh_flags; } else { - if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, + if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6, &nh_flags, false) < 0) goto nla_put_failure;
diff --combined net/mac80211/cfg.c index 7c6edb7c5f10,4105c97c7ba1..70739e746c13 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@@ -980,8 -980,7 +980,8 @@@ static int ieee80211_start_ap(struct wi BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER | - BSS_CHANGED_TWT; + BSS_CHANGED_TWT | + BSS_CHANGED_HE_OBSS_PD; int err; int prev_beacon_int;
@@@ -1052,8 -1051,6 +1052,8 @@@ sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p; sdata->vif.bss_conf.twt_responder = params->twt_responder; + memcpy(&sdata->vif.bss_conf.he_obss_pd, ¶ms->he_obss_pd, + sizeof(struct ieee80211_he_obss_pd));
sdata->vif.bss_conf.ssid_len = params->ssid_len; if (params->ssid_len) @@@ -1532,7 -1529,6 +1532,6 @@@ static int ieee80211_add_station(struc struct sta_info *sta; struct ieee80211_sub_if_data *sdata; int err; - int layer2_update;
if (params->vlan) { sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); @@@ -1546,7 -1542,7 +1545,7 @@@ if (ether_addr_equal(mac, sdata->vif.addr)) return -EINVAL;
- if (is_multicast_ether_addr(mac)) + if (!is_valid_ether_addr(mac)) return -EINVAL;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) && @@@ -1576,18 -1572,12 +1575,12 @@@ test_sta_flag(sta, WLAN_STA_ASSOC)) rate_control_rate_init(sta);
- layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_AP; - err = sta_info_insert_rcu(sta); if (err) { rcu_read_unlock(); return err; }
- if (layer2_update) - cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); - rcu_read_unlock();
return 0; @@@ -1685,10 -1675,11 +1678,11 @@@ static int ieee80211_change_station(str sta->sdata = vlansdata; ieee80211_check_fast_xmit(sta);
- if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { ieee80211_vif_inc_num_mcast(sta->sdata); - - cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); + } }
err = sta_apply_parameters(local, sta, params); diff --combined net/mac80211/sta_info.c index df553070206c,5fb368cc2633..bd11fef2139f --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@@ -1065,6 -1065,7 +1065,6 @@@ static void __sta_info_destroy_part2(st cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); kfree(sinfo);
- rate_control_remove_sta_debugfs(sta); ieee80211_sta_debugfs_remove(sta);
cleanup_single_sta(sta); @@@ -1961,7 -1962,6 +1961,7 @@@ int sta_info_move_state(struct sta_inf case IEEE80211_STA_ASSOC: if (sta->sta_state == IEEE80211_STA_AUTH) { set_bit(WLAN_STA_ASSOC, &sta->_flags); + sta->assoc_at = ktime_get_boottime_ns(); ieee80211_recalc_min_chandef(sta->sdata); if (!sta->sta.support_p2p_ps) ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); @@@ -1979,6 -1979,10 +1979,10 @@@ ieee80211_check_fast_xmit(sta); ieee80211_check_fast_rx(sta); } + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sta->sdata->vif.type == NL80211_IFTYPE_AP) + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); break; default: break; @@@ -2191,7 -2195,6 +2195,7 @@@ void sta_set_sinfo(struct sta_info *sta BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | + BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
if (sdata->vif.type == NL80211_IFTYPE_STATION) { @@@ -2200,7 -2203,6 +2204,7 @@@ }
sinfo->connected_time = ktime_get_seconds() - sta->last_connected; + sinfo->assoc_at = sta->assoc_at; sinfo->inactive_time = jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
diff --combined net/netfilter/nf_flow_table_core.c index 09310a1bd91f,a0b4bf654de2..132f5228b431 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@@ -11,7 -11,6 +11,7 @@@ #include <net/netfilter/nf_flow_table.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> +#include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_tuple.h>
struct flow_offload_entry { @@@ -218,7 -217,7 +218,7 @@@ int flow_offload_add(struct nf_flowtabl return err; }
- flow->timeout = (u32)jiffies; + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; return 0; } EXPORT_SYMBOL_GPL(flow_offload_add); diff --combined net/rds/bind.c index 6dbb763bc1fd,05464fd7c17a..20c156a73e73 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@@ -1,5 -1,5 +1,5 @@@ /* - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@@ -181,7 -181,7 +181,7 @@@ int rds_bind(struct socket *sock, struc if (addr_len < sizeof(struct sockaddr_in) || sin->sin_addr.s_addr == htonl(INADDR_ANY) || sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + ipv4_is_multicast(sin->sin_addr.s_addr)) return -EINVAL; ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr); binding_addr = &v6addr; @@@ -206,7 -206,7 +206,7 @@@ addr4 = sin6->sin6_addr.s6_addr32[3]; if (addr4 == htonl(INADDR_ANY) || addr4 == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(addr4))) + ipv4_is_multicast(addr4)) return -EINVAL; } /* The scope ID must be specified for link local address. */ @@@ -239,34 -239,30 +239,30 @@@ goto out; }
- sock_set_flag(sk, SOCK_RCU_FREE); - ret = rds_add_bound(rs, binding_addr, &port, scope_id); - if (ret) - goto out; - - if (rs->rs_transport) { /* previously bound */ + /* The transport can be set using SO_RDS_TRANSPORT option before the + * socket is bound. + */ + if (rs->rs_transport) { trans = rs->rs_transport; if (trans->laddr_check(sock_net(sock->sk), binding_addr, scope_id) != 0) { ret = -ENOPROTOOPT; - rds_remove_bound(rs); - } else { - ret = 0; + goto out; } - goto out; - } - trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr, - scope_id); - if (!trans) { - ret = -EADDRNOTAVAIL; - rds_remove_bound(rs); - pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n", - __func__, binding_addr); - goto out; + } else { + trans = rds_trans_get_preferred(sock_net(sock->sk), + binding_addr, scope_id); + if (!trans) { + ret = -EADDRNOTAVAIL; + pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n", + __func__, binding_addr); + goto out; + } + rs->rs_transport = trans; }
- rs->rs_transport = trans; - ret = 0; + sock_set_flag(sk, SOCK_RCU_FREE); + ret = rds_add_bound(rs, binding_addr, &port, scope_id);
out: release_sock(sk); diff --combined net/sctp/protocol.c index b48ffe845c31,53746ffeeca3..08d14d86ecfb --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@@ -1254,9 -1254,6 +1254,9 @@@ static int __net_init sctp_defaults_ini /* Disable AUTH by default. */ net->sctp.auth_enable = 0;
+ /* Enable ECN by default. */ + net->sctp.ecn_enable = 1; + /* Set SCOPE policy to enabled */ net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
@@@ -1339,7 -1336,7 +1339,7 @@@ static int __net_init sctp_ctrlsock_ini return status; }
- static void __net_init sctp_ctrlsock_exit(struct net *net) + static void __net_exit sctp_ctrlsock_exit(struct net *net) { /* Free the control endpoint. */ inet_ctl_sock_destroy(net->sctp.ctl_sock); diff --combined net/sctp/socket.c index 3e50a9712fb1,b083d4e66230..939b8d2595bc --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@@ -309,7 -309,7 +309,7 @@@ static int sctp_bind(struct sock *sk, s return retval; }
- static long sctp_get_port_local(struct sock *, union sctp_addr *); + static int sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, @@@ -399,9 -399,8 +399,8 @@@ static int sctp_do_bind(struct sock *sk * detection. */ addr->v4.sin_port = htons(snum); - if ((ret = sctp_get_port_local(sk, addr))) { + if (sctp_get_port_local(sk, addr)) return -EADDRINUSE; - }
/* Refresh ephemeral port. */ if (!bp->port) @@@ -413,11 -412,13 +412,13 @@@ ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, SCTP_ADDR_SRC, GFP_ATOMIC);
- /* Copy back into socket for getsockname() use. */ - if (!ret) { - inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); - sp->pf->to_sk_saddr(addr, sk); + if (ret) { + sctp_put_port(sk); + return ret; } + /* Copy back into socket for getsockname() use. */ + inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); + sp->pf->to_sk_saddr(addr, sk);
return ret; } @@@ -524,6 -525,7 +525,6 @@@ static int sctp_send_asconf_add_ip(stru struct sockaddr *addrs, int addrcnt) { - struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; @@@ -538,12 -540,12 +539,12 @@@ int i; int retval = 0;
- if (!net->sctp.addip_enable) - return retval; - sp = sctp_sk(sk); ep = sp->ep;
+ if (!ep->asconf_enable) + return retval; + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt);
@@@ -726,6 -728,7 +727,6 @@@ static int sctp_send_asconf_del_ip(stru struct sockaddr *addrs, int addrcnt) { - struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; @@@ -741,12 -744,12 +742,12 @@@ int stored = 0;
chunk = NULL; - if (!net->sctp.addip_enable) - return retval; - sp = sctp_sk(sk); ep = sp->ep;
+ if (!ep->asconf_enable) + return retval; + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt);
@@@ -1042,161 -1045,158 +1043,161 @@@ out return err; }
-/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) - * - * Common routine for handling connect() and sctp_connectx(). - * Connect will come in with just a single address. - */ -static int __sctp_connect(struct sock *sk, - struct sockaddr *kaddrs, - int addrs_size, int flags, - sctp_assoc_t *assoc_id) +static int sctp_connect_new_asoc(struct sctp_endpoint *ep, + const union sctp_addr *daddr, + const struct sctp_initmsg *init, + struct sctp_transport **tp) { + struct sctp_association *asoc; + struct sock *sk = ep->base.sk; struct net *net = sock_net(sk); - struct sctp_sock *sp; - struct sctp_endpoint *ep; - struct sctp_association *asoc = NULL; - struct sctp_association *asoc2; - struct sctp_transport *transport; - union sctp_addr to; enum sctp_scope scope; - long timeo; - int err = 0; - int addrcnt = 0; - int walk_size = 0; - union sctp_addr *sa_addr = NULL; - void *addr_buf; - unsigned short port; + int err;
- sp = sctp_sk(sk); - ep = sp->ep; + if (sctp_endpoint_is_peeled_off(ep, daddr)) + return -EADDRNOTAVAIL;
- /* connect() cannot be done on a socket that is already in ESTABLISHED - * state - UDP-style peeled off socket or a TCP-style socket that - * is already connected. - * It cannot be done even on a TCP-style listening socket. - */ - if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || - (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { - err = -EISCONN; - goto out_free; + if (!ep->base.bind_addr.port) { + if (sctp_autobind(sk)) + return -EAGAIN; + } else { + if (ep->base.bind_addr.port < inet_prot_sock(net) && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) + return -EACCES; }
- /* Walk through the addrs buffer and count the number of addresses. */ - addr_buf = kaddrs; - while (walk_size < addrs_size) { - struct sctp_af *af; - - if (walk_size + sizeof(sa_family_t) > addrs_size) { - err = -EINVAL; - goto out_free; - } + scope = sctp_scope(daddr); + asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); + if (!asoc) + return -ENOMEM;
- sa_addr = addr_buf; - af = sctp_get_af_specific(sa_addr->sa.sa_family); + err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); + if (err < 0) + goto free;
- /* If the address family is not supported or if this address - * causes the address buffer to overflow return EINVAL. - */ - if (!af || (walk_size + af->sockaddr_len) > addrs_size) { - err = -EINVAL; - goto out_free; - } + *tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN); + if (!*tp) { + err = -ENOMEM; + goto free; + }
- port = ntohs(sa_addr->v4.sin_port); + if (!init) + return 0;
- /* Save current address so we can work with it */ - memcpy(&to, sa_addr, af->sockaddr_len); + if (init->sinit_num_ostreams) { + __u16 outcnt = init->sinit_num_ostreams;
- err = sctp_verify_addr(sk, &to, af->sockaddr_len); + asoc->c.sinit_num_ostreams = outcnt; + /* outcnt has been changed, need to re-init stream */ + err = sctp_stream_init(&asoc->stream, outcnt, 0, GFP_KERNEL); if (err) - goto out_free; + goto free; + }
- /* Make sure the destination port is correctly set - * in all addresses. - */ - if (asoc && asoc->peer.port && asoc->peer.port != port) { - err = -EINVAL; - goto out_free; - } + if (init->sinit_max_instreams) + asoc->c.sinit_max_instreams = init->sinit_max_instreams;
- /* Check if there already is a matching association on the - * endpoint (other than the one created here). - */ - asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); - if (asoc2 && asoc2 != asoc) { - if (asoc2->state >= SCTP_STATE_ESTABLISHED) - err = -EISCONN; - else - err = -EALREADY; - goto out_free; - } + if (init->sinit_max_attempts) + asoc->max_init_attempts = init->sinit_max_attempts;
- /* If we could not find a matching association on the endpoint, - * make sure that there is no peeled-off association matching - * the peer address even on another socket. - */ - if (sctp_endpoint_is_peeled_off(ep, &to)) { - err = -EADDRNOTAVAIL; - goto out_free; - } + if (init->sinit_max_init_timeo) + asoc->max_init_timeo = + msecs_to_jiffies(init->sinit_max_init_timeo);
- if (!asoc) { - /* If a bind() or sctp_bindx() is not called prior to - * an sctp_connectx() call, the system picks an - * ephemeral port and will choose an address set - * equivalent to binding with a wildcard address. - */ - if (!ep->base.bind_addr.port) { - if (sctp_autobind(sk)) { - err = -EAGAIN; - goto out_free; - } - } else { - /* - * If an unprivileged user inherits a 1-many - * style socket with open associations on a - * privileged port, it MAY be permitted to - * accept new associations, but it SHOULD NOT - * be permitted to open new associations. - */ - if (ep->base.bind_addr.port < - inet_prot_sock(net) && - !ns_capable(net->user_ns, - CAP_NET_BIND_SERVICE)) { - err = -EACCES; - goto out_free; - } - } + return 0; +free: + sctp_association_free(asoc); + return err; +}
- scope = sctp_scope(&to); - asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); - if (!asoc) { - err = -ENOMEM; - goto out_free; - } +static int sctp_connect_add_peer(struct sctp_association *asoc, + union sctp_addr *daddr, int addr_len) +{ + struct sctp_endpoint *ep = asoc->ep; + struct sctp_association *old; + struct sctp_transport *t; + int err;
- err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, - GFP_KERNEL); - if (err < 0) { - goto out_free; - } + err = sctp_verify_addr(ep->base.sk, daddr, addr_len); + if (err) + return err;
- } + old = sctp_endpoint_lookup_assoc(ep, daddr, &t); + if (old && old != asoc) + return old->state >= SCTP_STATE_ESTABLISHED ? -EISCONN + : -EALREADY; + + if (sctp_endpoint_is_peeled_off(ep, daddr)) + return -EADDRNOTAVAIL; + + t = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN); + if (!t) + return -ENOMEM;
- /* Prime the peer's transport structures. */ - transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, - SCTP_UNKNOWN); - if (!transport) { - err = -ENOMEM; + return 0; +} + +/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) + * + * Common routine for handling connect() and sctp_connectx(). + * Connect will come in with just a single address. + */ +static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, + int addrs_size, int flags, sctp_assoc_t *assoc_id) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_endpoint *ep = sp->ep; + struct sctp_transport *transport; + struct sctp_association *asoc; + void *addr_buf = kaddrs; + union sctp_addr *daddr; + struct sctp_af *af; + int walk_size, err; + long timeo; + + if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || + (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) + return -EISCONN; + + daddr = addr_buf; + af = sctp_get_af_specific(daddr->sa.sa_family); + if (!af || af->sockaddr_len > addrs_size) + return -EINVAL; + + err = sctp_verify_addr(sk, daddr, af->sockaddr_len); + if (err) + return err; + + asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport); + if (asoc) + return asoc->state >= SCTP_STATE_ESTABLISHED ? -EISCONN + : -EALREADY; + + err = sctp_connect_new_asoc(ep, daddr, NULL, &transport); + if (err) + return err; + asoc = transport->asoc; + + addr_buf += af->sockaddr_len; + walk_size = af->sockaddr_len; + while (walk_size < addrs_size) { + err = -EINVAL; + if (walk_size + sizeof(sa_family_t) > addrs_size) goto out_free; - }
- addrcnt++; - addr_buf += af->sockaddr_len; + daddr = addr_buf; + af = sctp_get_af_specific(daddr->sa.sa_family); + if (!af || af->sockaddr_len + walk_size > addrs_size) + goto out_free; + + if (asoc->peer.port != ntohs(daddr->v4.sin_port)) + goto out_free; + + err = sctp_connect_add_peer(asoc, daddr, af->sockaddr_len); + if (err) + goto out_free; + + addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; }
@@@ -1209,25 -1209,40 +1210,25 @@@ goto out_free; }
- err = sctp_primitive_ASSOCIATE(net, asoc, NULL); - if (err < 0) { + err = sctp_primitive_ASSOCIATE(sock_net(sk), asoc, NULL); + if (err < 0) goto out_free; - }
/* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); - sp->pf->to_sk_daddr(sa_addr, sk); + sp->pf->to_sk_daddr(daddr, sk); sk->sk_err = 0;
- timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); - if (assoc_id) *assoc_id = asoc->assoc_id;
- err = sctp_wait_for_connect(asoc, &timeo); - /* Note: the asoc may be freed after the return of - * sctp_wait_for_connect. - */ - - /* Don't free association on exit. */ - asoc = NULL; + timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); + return sctp_wait_for_connect(asoc, &timeo);
out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); - - if (asoc) { - /* sctp_primitive_ASSOCIATE may have added this association - * To the hash table, try to unhash it, just in case, its a noop - * if it wasn't hashed so we're safe - */ - sctp_association_free(asoc); - } + sctp_association_free(asoc); return err; }
@@@ -1297,8 -1312,7 +1298,8 @@@ static int __sctp_setsockopt_connectx(s pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size);
- if (unlikely(addrs_size <= 0)) + /* make sure the 1st addr's sa_family is accessible later */ + if (unlikely(addrs_size < sizeof(sa_family_t))) return -EINVAL;
kaddrs = memdup_user(addrs, addrs_size); @@@ -1646,7 -1660,9 +1647,7 @@@ static int sctp_sendmsg_new_asoc(struc struct sctp_transport **tp) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; - struct net *net = sock_net(sk); struct sctp_association *asoc; - enum sctp_scope scope; struct cmsghdr *cmsg; __be32 flowinfo = 0; struct sctp_af *af; @@@ -1661,6 -1677,20 +1662,6 @@@ sctp_sstate(sk, CLOSING))) return -EADDRNOTAVAIL;
- if (sctp_endpoint_is_peeled_off(ep, daddr)) - return -EADDRNOTAVAIL; - - if (!ep->base.bind_addr.port) { - if (sctp_autobind(sk)) - return -EAGAIN; - } else { - if (ep->base.bind_addr.port < inet_prot_sock(net) && - !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) - return -EACCES; - } - - scope = sctp_scope(daddr); - /* Label connection socket for first association 1-to-many * style for client sequence socket()->sendmsg(). This * needs to be done before sctp_assoc_add_peer() as that will @@@ -1676,10 -1706,45 +1677,10 @@@ if (err < 0) return err;
- asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); - if (!asoc) - return -ENOMEM; - - if (sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL) < 0) { - err = -ENOMEM; - goto free; - } - - if (cmsgs->init) { - struct sctp_initmsg *init = cmsgs->init; - - if (init->sinit_num_ostreams) { - __u16 outcnt = init->sinit_num_ostreams; - - asoc->c.sinit_num_ostreams = outcnt; - /* outcnt has been changed, need to re-init stream */ - err = sctp_stream_init(&asoc->stream, outcnt, 0, - GFP_KERNEL); - if (err) - goto free; - } - - if (init->sinit_max_instreams) - asoc->c.sinit_max_instreams = init->sinit_max_instreams; - - if (init->sinit_max_attempts) - asoc->max_init_attempts = init->sinit_max_attempts; - - if (init->sinit_max_init_timeo) - asoc->max_init_timeo = - msecs_to_jiffies(init->sinit_max_init_timeo); - } - - *tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN); - if (!*tp) { - err = -ENOMEM; - goto free; - } + err = sctp_connect_new_asoc(ep, daddr, cmsgs->init, tp); + if (err) + return err; + asoc = (*tp)->asoc;
if (!cmsgs->addrs_msg) return 0; @@@ -1689,6 -1754,8 +1690,6 @@@
/* sendv addr list parse */ for_each_cmsghdr(cmsg, cmsgs->addrs_msg) { - struct sctp_transport *transport; - struct sctp_association *old; union sctp_addr _daddr; int dlen;
@@@ -1722,10 -1789,30 +1723,10 @@@ daddr->v6.sin6_port = htons(asoc->peer.port); memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen); } - err = sctp_verify_addr(sk, daddr, sizeof(*daddr)); - if (err) - goto free; - - old = sctp_endpoint_lookup_assoc(ep, daddr, &transport); - if (old && old != asoc) { - if (old->state >= SCTP_STATE_ESTABLISHED) - err = -EISCONN; - else - err = -EALREADY; - goto free; - }
- if (sctp_endpoint_is_peeled_off(ep, daddr)) { - err = -EADDRNOTAVAIL; - goto free; - } - - transport = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, - SCTP_UNKNOWN); - if (!transport) { - err = -ENOMEM; + err = sctp_connect_add_peer(asoc, daddr, sizeof(*daddr)); + if (err) goto free; - } }
return 0; @@@ -3328,6 -3415,7 +3329,6 @@@ static int sctp_setsockopt_maxseg(struc static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { - struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; @@@ -3337,7 -3425,7 +3338,7 @@@
sp = sctp_sk(sk);
- if (!net->sctp.addip_enable) + if (!sp->ep->asconf_enable) return -EPERM;
if (optlen != sizeof(struct sctp_setpeerprim)) @@@ -3687,6 -3775,9 +3688,6 @@@ static int sctp_setsockopt_auth_key(str struct sctp_association *asoc; int ret = -EINVAL;
- if (!ep->auth_enable) - return -EACCES; - if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; /* authkey->sca_keylength is u16, so optlen can't be bigger than @@@ -3753,6 -3844,9 +3754,6 @@@ static int sctp_setsockopt_active_key(s struct sctp_authkeyid val; int ret = 0;
- if (!ep->auth_enable) - return -EACCES; - if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@@ -3804,6 -3898,9 +3805,6 @@@ static int sctp_setsockopt_del_key(stru struct sctp_authkeyid val; int ret = 0;
- if (!ep->auth_enable) - return -EACCES; - if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@@ -3854,6 -3951,9 +3855,6 @@@ static int sctp_setsockopt_deactivate_k struct sctp_authkeyid val; int ret = 0;
- if (!ep->auth_enable) - return -EACCES; - if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@@ -4484,110 -4584,6 +4485,110 @@@ static int sctp_setsockopt_event(struc return retval; }
+static int sctp_setsockopt_asconf_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + struct sctp_endpoint *ep; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + goto out; + + ep = sctp_sk(sk)->ep; + ep->asconf_enable = !!params.assoc_value; + + if (ep->asconf_enable && ep->auth_enable) { + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF); + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK); + } + + retval = 0; + +out: + return retval; +} + +static int sctp_setsockopt_auth_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + struct sctp_endpoint *ep; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + goto out; + + ep = sctp_sk(sk)->ep; + if (params.assoc_value) { + retval = sctp_auth_init(ep, GFP_KERNEL); + if (retval) + goto out; + if (ep->asconf_enable) { + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF); + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK); + } + } + + ep->auth_enable = !!params.assoc_value; + retval = 0; + +out: + return retval; +} + +static int sctp_setsockopt_ecn_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + goto out; + + sctp_sk(sk)->ep->ecn_enable = !!params.assoc_value; + retval = 0; + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@@ -4788,15 -4784,6 +4789,15 @@@ static int sctp_setsockopt(struct sock case SCTP_EVENT: retval = sctp_setsockopt_event(sk, optval, optlen); break; + case SCTP_ASCONF_SUPPORTED: + retval = sctp_setsockopt_asconf_supported(sk, optval, optlen); + break; + case SCTP_AUTH_SUPPORTED: + retval = sctp_setsockopt_auth_supported(sk, optval, optlen); + break; + case SCTP_ECN_SUPPORTED: + retval = sctp_setsockopt_ecn_supported(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@@ -6934,6 -6921,9 +6935,6 @@@ static int sctp_getsockopt_active_key(s struct sctp_authkeyid val; struct sctp_association *asoc;
- if (!ep->auth_enable) - return -EACCES; - if (len < sizeof(struct sctp_authkeyid)) return -EINVAL;
@@@ -6945,15 -6935,10 +6946,15 @@@ if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL;
- if (asoc) + if (asoc) { + if (!asoc->peer.auth_capable) + return -EACCES; val.scact_keynumber = asoc->active_key_id; - else + } else { + if (!ep->auth_enable) + return -EACCES; val.scact_keynumber = ep->active_key_id; + }
if (put_user(len, optlen)) return -EFAULT; @@@ -6966,6 -6951,7 +6967,6 @@@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { - struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; @@@ -6973,6 -6959,9 +6974,6 @@@ u32 num_chunks = 0; char __user *to;
- if (!ep->auth_enable) - return -EACCES; - if (len < sizeof(struct sctp_authchunks)) return -EINVAL;
@@@ -6984,9 -6973,6 +6985,9 @@@ if (!asoc) return -EINVAL;
+ if (!asoc->peer.auth_capable) + return -EACCES; + ch = asoc->peer.peer_chunks; if (!ch) goto num; @@@ -7018,6 -7004,9 +7019,6 @@@ static int sctp_getsockopt_local_auth_c u32 num_chunks = 0; char __user *to;
- if (!ep->auth_enable) - return -EACCES; - if (len < sizeof(struct sctp_authchunks)) return -EINVAL;
@@@ -7030,15 -7019,8 +7031,15 @@@ sctp_style(sk, UDP)) return -EINVAL;
- ch = asoc ? (struct sctp_chunks_param *)asoc->c.auth_chunks - : ep->auth_chunk_list; + if (asoc) { + if (!asoc->peer.auth_capable) + return -EACCES; + ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; + } else { + if (!ep->auth_enable) + return -EACCES; + ch = ep->auth_chunk_list; + } if (!ch) goto num;
@@@ -7192,7 -7174,7 +7193,7 @@@ static int sctp_getsockopt_paddr_thresh val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans;
- return 0; + goto out; }
asoc = sctp_id2assoc(sk, val.spt_assoc_id); @@@ -7210,6 -7192,7 +7211,7 @@@ val.spt_pathmaxrxt = sp->pathmaxrxt; }
+ out: if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT;
@@@ -7781,123 -7764,6 +7783,123 @@@ static int sctp_getsockopt_event(struc return 0; }
+static int sctp_getsockopt_asconf_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + retval = -EINVAL; + goto out; + } + + params.assoc_value = asoc ? asoc->peer.asconf_capable + : sctp_sk(sk)->ep->asconf_enable; + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + +static int sctp_getsockopt_auth_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + retval = -EINVAL; + goto out; + } + + params.assoc_value = asoc ? asoc->peer.auth_capable + : sctp_sk(sk)->ep->auth_enable; + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + +static int sctp_getsockopt_ecn_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + retval = -EINVAL; + goto out; + } + + params.assoc_value = asoc ? asoc->peer.ecn_capable + : sctp_sk(sk)->ep->ecn_enable; + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@@ -8099,17 -7965,6 +8101,17 @@@ case SCTP_EVENT: retval = sctp_getsockopt_event(sk, len, optval, optlen); break; + case SCTP_ASCONF_SUPPORTED: + retval = sctp_getsockopt_asconf_supported(sk, len, optval, + optlen); + break; + case SCTP_AUTH_SUPPORTED: + retval = sctp_getsockopt_auth_supported(sk, len, optval, + optlen); + break; + case SCTP_ECN_SUPPORTED: + retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@@ -8145,7 -8000,7 +8147,7 @@@ static void sctp_unhash(struct sock *sk static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
- static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) + static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_sock *sp = sctp_sk(sk); bool reuse = (sk->sk_reuse || sp->reuse); @@@ -8255,7 -8110,7 +8257,7 @@@ pp_found
if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sp2, sp)) { - ret = (long)sk2; + ret = 1; goto fail_unlock; } } @@@ -8327,7 -8182,7 +8329,7 @@@ static int sctp_get_port(struct sock *s addr.v4.sin_port = htons(snum);
/* Note: sk->sk_num gets filled in if ephemeral port request. */ - return !!sctp_get_port_local(sk, &addr); + return sctp_get_port_local(sk, &addr); }
/* diff --combined net/tipc/name_distr.c index 61219f0b9677,241ed2274473..836e629e8f4a --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@@ -190,7 -190,7 +190,7 @@@ void tipc_named_node_up(struct net *net struct name_table *nt = tipc_name_table(net); struct sk_buff_head head;
- skb_queue_head_init(&head); + __skb_queue_head_init(&head);
read_lock_bh(&nt->cluster_scope_lock); named_distribute(net, &head, dnode, &nt->cluster_scope); @@@ -223,7 -223,8 +223,8 @@@ static void tipc_publ_purge(struct net publ->key); }
- kfree_rcu(p, rcu); + if (p) + kfree_rcu(p, rcu); }
/**