The following commit has been merged in the master branch: commit e486463e82e4dca9e8f4413649088b21c9ff87e5 Merge: ed3b856b69a7f3748d6917e42d462c962aaa39b8 fa809e2fd6e317226c046202a88520962672eac0 Author: David S. Miller davem@davemloft.net Date: Mon Jun 25 15:50:32 2012 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/usb/qmi_wwan.c net/batman-adv/translation-table.c net/ipv6/route.c
qmi_wwan.c resolution provided by Bjørn Mork.
batman-adv conflict is dealing merely with the changes of global function names to have a proper subsystem prefix.
ipv6's route.c conflict is merely two side-by-side additions of network namespace methods.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined drivers/bluetooth/btmrvl_sdio.c index 2867499,0cd61d9..cf7588ed --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@@ -110,9 -110,6 +110,9 @@@ static const struct sdio_device_id btmr /* Marvell SD8787 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, + /* Marvell SD8787 Bluetooth AMP device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B), + .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, @@@ -565,10 -562,12 +565,12 @@@ static int btmrvl_sdio_card_to_host(str skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN);
- if (type == HCI_EVENT_PKT) - btmrvl_check_evtpkt(priv, skb); + if (type == HCI_EVENT_PKT) { + if (btmrvl_check_evtpkt(priv, skb)) + hci_recv_frame(skb); + } else + hci_recv_frame(skb);
- hci_recv_frame(skb); hdev->stat.byte_rx += buf_len; break;
diff --combined drivers/bluetooth/btusb.c index a45e717,83ebb24..e272214 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@@ -21,7 -21,15 +21,7 @@@ * */
-#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/skbuff.h> - #include <linux/usb.h>
#include <net/bluetooth/bluetooth.h> @@@ -117,6 -125,7 +117,7 @@@ static struct usb_device_id blacklist_t
/* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, @@@ -131,6 -140,7 +132,7 @@@ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, @@@ -1018,7 -1028,7 +1020,7 @@@ static int btusb_probe(struct usb_inter data->isoc = usb_ifnum_to_if(data->udev, 1);
if (!reset) - set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { if (!disable_scofix) @@@ -1030,7 -1040,7 +1032,7 @@@
if (id->driver_info & BTUSB_DIGIANSWER) { data->cmdreq_type = USB_TYPE_VENDOR; - set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); }
if (id->driver_info & BTUSB_CSR) { @@@ -1038,7 -1048,7 +1040,7 @@@
/* Old firmware would otherwise execute USB reset */ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) - set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); }
if (id->driver_info & BTUSB_SNIFFER) { diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 3e662bf,6e7d5c0..e04b282 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@@ -40,6 -40,7 +40,7 @@@ #define I2C_BSC0 0 #define I2C_BSC1 1 #define I2C_WA_RETRY_CNT 3 + #define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) #define MCPR_IMC_COMMAND_READ_OP 1 #define MCPR_IMC_COMMAND_WRITE_OP 2
@@@ -284,6 -285,7 +285,6 @@@ #define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700) -#define WC_UC_TIMEOUT 100 #define MAX_KR_LINK_RETRY 4
/**********************************************************/ @@@ -1304,94 -1306,6 +1305,94 @@@ int bnx2x_ets_strict(const struct link_
return 0; } + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static u8 bnx2x_eee_has_cap(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + if (REG_RD(bp, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return 0; +} + +static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) +{ + switch (idle_timer) { + case EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return 0; +} + +static u32 bnx2x_eee_calc_timer(struct link_params *params) +{ + u32 eee_mode, eee_idle; + struct bnx2x *bp = params->bp; + + if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly*/ + eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (bnx2x_eee_nvram_to_time(params->eee_mode & + EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time*/ + eee_mode = ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + + /******************************************************************/ /* PFC section */ /******************************************************************/ @@@ -1626,7 -1540,7 +1627,7 @@@ static void bnx2x_umac_enable(struct li /* Reset UMAC */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); - usleep_range(1000, 1000); + usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); @@@ -1728,7 -1642,7 +1729,7 @@@ static void bnx2x_xmac_init(struct link /* Hard reset */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_XMAC); - usleep_range(1000, 1000); + usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, MISC_REGISTERS_RESET_REG_2_XMAC); @@@ -1758,7 -1672,7 +1759,7 @@@ /* Soft reset */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); - usleep_range(1000, 1000); + usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); @@@ -1816,14 -1730,6 +1817,14 @@@ static int bnx2x_xmac_enable(struct lin /* update PFC */ bnx2x_update_pfc_xmac(params, vars, 0);
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n"); + REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); + REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1); + } else { + REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0); + } + /* Enable TX and RX */ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
@@@ -1879,6 -1785,11 +1880,6 @@@ static int bnx2x_emac_enable(struct lin bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
- if (CHIP_REV_IS_SLOW(bp)) { - /* config GMII mode */ - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); - } else { /* ASIC */ /* pause enable/disable */ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, EMAC_RX_MODE_FLOW_EN); @@@ -1901,6 -1812,7 +1902,6 @@@ } else bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_FLOW_EN); - }
/* KEEP_VLAN_TAG, promiscuous */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); @@@ -1939,23 -1851,23 +1940,23 @@@ val &= ~0x810; EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
- /* enable emac */ + /* Enable emac */ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
- /* enable emac for jumbo packets */ + /* Enable emac for jumbo packets */ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, (EMAC_RX_MTU_SIZE_JUMBO_ENA | (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
- /* strip CRC */ + /* Strip CRC */ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
- /* disable the NIG in/out to the bmac */ + /* Disable the NIG in/out to the bmac */ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
- /* enable the NIG in/out to the emac */ + /* Enable the NIG in/out to the emac */ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); val = 0; if ((params->feature_config_flags & @@@ -1990,7 -1902,7 +1991,7 @@@ static void bnx2x_update_pfc_bmac1(stru wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
- /* tx control */ + /* TX control */ val = 0xc0; if (!(params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) && @@@ -2050,7 -1962,7 +2051,7 @@@ static void bnx2x_update_pfc_bmac2(stru wb_data[0] &= ~(1<<2); } else { DP(NETIF_MSG_LINK, "PFC is disabled\n"); - /* disable PFC RX & TX & STATS and set 8 COS */ + /* Disable PFC RX & TX & STATS and set 8 COS */ wb_data[0] = 0x8; wb_data[1] = 0; } @@@ -2144,7 -2056,7 +2145,7 @@@ static int bnx2x_pfc_brb_get_config_par PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ + /* Non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@@ -2172,7 -2084,7 +2173,7 @@@ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ + /* Non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@@ -2202,7 -2114,7 +2203,7 @@@ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ + /* Non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@@ -2220,7 -2132,7 +2221,7 @@@ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ + /* Non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@@ -2277,7 -2189,7 +2278,7 @@@ static void bnx2x_pfc_brb_get_e3b0_conf
if (pfc_params->cos0_pauseable != pfc_params->cos1_pauseable) { - /* nonpauseable= Lossy + pauseable = Lossless*/ + /* Nonpauseable= Lossy + pauseable = Lossless*/ e3b0_val->lb_guarantied = PFC_E3B0_2P_MIX_PAUSE_LB_GUART; e3b0_val->mac_0_class_t_guarantied = @@@ -2476,9 -2388,9 +2477,9 @@@ static int bnx2x_update_pfc_brb(struct * This function is needed because NIG ARB_CREDIT_WEIGHT_X are * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. ******************************************************************************/ -int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, - u8 cos_entry, - u32 priority_mask, u8 port) +static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, + u8 cos_entry, + u32 priority_mask, u8 port) { u32 nig_reg_rx_priority_mask_add = 0;
@@@ -2528,16 -2440,6 +2529,16 @@@ static void bnx2x_update_mng(struct lin port_mb[params->port].link_status), link_status); }
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) +{ + struct bnx2x *bp = params->bp; + + if (bnx2x_eee_has_cap(params)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + static void bnx2x_update_pfc_nig(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *nig_params) @@@ -2605,7 -2507,7 +2606,7 @@@ REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
- /* output enable for RX_XCM # IF */ + /* Output enable for RX_XCM # IF */ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : NIG_REG_XCM0_OUT_EN, xcm_out_en);
@@@ -2654,10 -2556,10 +2655,10 @@@ int bnx2x_update_pfc(struct link_param
bnx2x_update_mng(params, vars->link_status);
- /* update NIG params */ + /* Update NIG params */ bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* update BRB params */ + /* Update BRB params */ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); if (bnx2x_status) return bnx2x_status; @@@ -2712,7 -2614,7 +2713,7 @@@ static int bnx2x_bmac1_enable(struct li REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, wb_data, 2);
- /* tx MAC SA */ + /* TX MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | @@@ -2721,7 -2623,7 +2722,7 @@@ params->mac_addr[1]); REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
- /* mac control */ + /* MAC control */ val = 0x3; if (is_lb) { val |= 0x4; @@@ -2731,24 -2633,24 +2732,24 @@@ wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
- /* set rx mtu */ + /* Set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
- /* set tx mtu */ + /* Set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
- /* set cnt max size */ + /* Set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
- /* configure safc */ + /* Configure SAFC */ wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, @@@ -2782,7 -2684,7 +2783,7 @@@ static int bnx2x_bmac2_enable(struct li
udelay(30);
- /* tx MAC SA */ + /* TX MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | @@@ -2801,18 -2703,18 +2802,18 @@@ wb_data, 2); udelay(30);
- /* set rx mtu */ + /* Set RX MTU */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); udelay(30);
- /* set tx mtu */ + /* Set TX MTU */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); udelay(30); - /* set cnt max size */ + /* Set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); @@@ -2830,15 -2732,15 +2831,15 @@@ static int bnx2x_bmac_enable(struct lin u8 port = params->port; struct bnx2x *bp = params->bp; u32 val; - /* reset and unreset the BigMac */ + /* Reset and unreset the BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - msleep(1); + usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- /* enable access for bmac registers */ + /* Enable access for bmac registers */ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/ @@@ -2896,7 -2798,7 +2897,7 @@@ static void bnx2x_bmac_rx_disable(struc BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); } - msleep(1); + usleep_range(1000, 2000); } }
@@@ -2908,16 -2810,17 +2909,16 @@@ static int bnx2x_pbf_update(struct link u32 init_crd, crd; u32 count = 1000;
- /* disable port */ + /* Disable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
- /* wait for init credit */ + /* Wait for init credit */ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) { - msleep(5); - + usleep_range(5000, 10000); crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); count--; } @@@ -2934,18 -2837,18 +2935,18 @@@ line_speed == SPEED_1000 || line_speed == SPEED_2500) { REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); - /* update threshold */ + /* Update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); - /* update init credit */ + /* Update init credit */ init_crd = 778; /* (800-18-4) */
} else { u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16; REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); - /* update threshold */ + /* Update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); - /* update init credit */ + /* Update init credit */ switch (line_speed) { case SPEED_10000: init_crd = thresh + 553 - 22; @@@ -2960,12 -2863,12 +2961,12 @@@ DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", line_speed, init_crd);
- /* probe the credit changes */ + /* Probe the credit changes */ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); - msleep(5); + usleep_range(5000, 10000); REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
- /* enable port */ + /* Enable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); return 0; } @@@ -3032,7 -2935,7 +3033,7 @@@ static int bnx2x_cl22_write(struct bnx2 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */ + /* Address */ tmp = ((phy->addr << 21) | (reg << 16) | val | EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY); @@@ -3068,7 -2971,7 +3069,7 @@@ static int bnx2x_cl22_read(struct bnx2 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */ + /* Address */ val = ((phy->addr << 21) | (reg << 16) | EMAC_MDIO_COMM_COMMAND_READ_22 | EMAC_MDIO_COMM_START_BUSY); @@@ -3106,7 -3009,7 +3107,7 @@@ static int bnx2x_cl45_read(struct bnx2 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, EMAC_MDIO_STATUS_10MB); - /* address */ + /* Address */ val = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@@ -3127,7 -3030,7 +3128,7 @@@ *ret_val = 0; rc = -EFAULT; } else { - /* data */ + /* Data */ val = ((phy->addr << 21) | (devad << 16) | EMAC_MDIO_COMM_COMMAND_READ_45 | EMAC_MDIO_COMM_START_BUSY); @@@ -3175,7 -3078,7 +3176,7 @@@ static int bnx2x_cl45_write(struct bnx2 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, EMAC_MDIO_STATUS_10MB);
- /* address */ + /* Address */ tmp = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@@ -3195,7 -3098,7 +3196,7 @@@ netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } else { - /* data */ + /* Data */ tmp = ((phy->addr << 21) | (devad << 16) | val | EMAC_MDIO_COMM_COMMAND_WRITE_45 | EMAC_MDIO_COMM_START_BUSY); @@@ -3285,23 -3188,23 +3286,23 @@@ static int bnx2x_bsc_read(struct link_p
xfer_cnt = 16 - lc_addr;
- /* enable the engine */ + /* Enable the engine */ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); val |= MCPR_IMC_COMMAND_ENABLE; REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* program slave device ID */ + /* Program slave device ID */ val = (sl_devid << 16) | sl_addr; REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* start xfer with 0 byte to update the address pointer ???*/ + /* Start xfer with 0 byte to update the address pointer ???*/ val = (MCPR_IMC_COMMAND_ENABLE) | (MCPR_IMC_COMMAND_WRITE_OP << MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */ + /* Poll for completion */ i = 0; val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { @@@ -3317,7 -3220,7 +3318,7 @@@ if (rc == -EFAULT) return rc;
- /* start xfer with read op */ + /* Start xfer with read op */ val = (MCPR_IMC_COMMAND_ENABLE) | (MCPR_IMC_COMMAND_READ_OP << MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | @@@ -3325,7 -3228,7 +3326,7 @@@ (xfer_cnt); REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */ + /* Poll for completion */ i = 0; val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { @@@ -3428,7 -3331,7 +3429,7 @@@ static u8 bnx2x_get_warpcore_lane(struc port = port ^ 1;
lane = (port<<1) + path; - } else { /* two port mode - no port swap */ + } else { /* Two port mode - no port swap */
/* Figure out path swap value */ path_swap_ovr = @@@ -3506,7 -3409,7 +3507,7 @@@ static void bnx2x_serdes_deassert(struc
val = SERDES_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */ + /* Reset and unreset the SerDes/XGXS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); @@@ -3527,7 -3430,7 +3528,7 @@@ static void bnx2x_xgxs_deassert(struct
val = XGXS_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */ + /* Reset and unreset the SerDes/XGXS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); @@@ -3619,7 -3522,7 +3620,7 @@@ static void bnx2x_ext_phy_set_pause(str { u16 val; struct bnx2x *bp = params->bp; - /* read modify write pause advertizing */ + /* Read modify write pause advertizing */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; @@@ -3754,35 -3657,44 +3755,35 @@@ static u8 bnx2x_ext_phy_resolve_fc(stru static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 val16 = 0, lane, bam37 = 0; - struct bnx2x *bp = params->bp; + u16 val16 = 0, lane, i; + struct bnx2x *bp = params->bp; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff}, + {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, + /* Disable Autoneg: re-enable it after adv is done. */ + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0} + }; DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); /* Set to default registers that may be overriden by 10G force */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, 0x7415); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190); - /* Disable Autoneg: re-enable it after adv is done. */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0); + for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val);
/* Check adding advertisement for 1G KX */ if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (vars->line_speed == SPEED_1000)) { - u16 sd_digital; + u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; val16 |= (1<<5);
/* Enable CL37 1G Parallel Detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - (sd_digital | 0x1)); - + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); DP(NETIF_MSG_LINK, "Advertize 1G\n"); } if (((vars->line_speed == SPEED_AUTO_NEG) && @@@ -3792,7 -3704,7 +3793,7 @@@ val16 |= (1<<7); /* Enable 10G Parallel Detect */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
DP(NETIF_MSG_LINK, "Advertize 10G\n"); } @@@ -3826,9 -3738,10 +3827,9 @@@ offsetof(struct shmem_region, dev_info. port_hw_config[params->port].default_cfg)) & PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, + 1); DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); }
@@@ -3842,8 -3755,11 +3843,8 @@@ DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; } - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, &val16); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
/* Over 1G - AN local device user page 1 */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@@ -3860,35 -3776,50 +3861,35 @@@ static void bnx2x_warpcore_set_10G_KR(s struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 val; - - /* Disable Autoneg */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL3_UP1, 0x1); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, 0xa); - - /* Disable CL36 PCS Tx */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); - - /* Double Wide Single Data Rate @ pll rate */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); - - /* Leave cl72 training enable, needed for KR */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + u16 i; + static struct bnx2x_reg_set reg_set[] = { + /* Disable Autoneg */ + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x3f00}, + {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, + /* Disable CL36 PCS Tx */ + {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0}, + /* Double Wide Single Data Rate @ pll rate */ + {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF}, + /* Leave cl72 training enable, needed for KR */ + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, - 0x2); + 0x2} + }; + + for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val);
/* Leave CL72 enabled */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - val | 0x3800); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x3800);
/* Set speed via PMA/PMD register */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, @@@ -3909,7 -3840,7 +3910,7 @@@ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0xF9);
- /* set and clear loopback to cause a reset to 64/66 decoder */ + /* Set and clear loopback to cause a reset to 64/66 decoder */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@@ -3924,12 -3855,16 +3925,12 @@@ static void bnx2x_warpcore_set_10G_XFI( struct bnx2x *bp = params->bp; u16 misc1_val, tap_val, tx_driver_val, lane, val; /* Hold rxSeqStart */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
/* Hold tx_fifo_reset */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
/* Disable CL73 AN */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); @@@ -3941,8 -3876,10 +3942,8 @@@ MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
/* Disable 100FX Idle detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@@ -4003,20 -3940,16 +4004,20 @@@ tx_driver_val);
/* Enable fiber mode, enable and invert sig_det */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, &val); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); + + /* Enable LPI pass through */ + DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); + MDIO_WC_REG_EEE_COMBO_CONTROL0, + 0x7c); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
/* 10G XFI Full Duplex */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@@ -4206,35 -4139,40 +4207,35 @@@ static void bnx2x_warpcore_clear_regs(s u16 lane) { struct bnx2x *bp = params->bp; - u16 val16; - + u16 i; + static struct bnx2x_reg_set wc_regs[] = { + {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0x0195}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + 0x0007}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, + {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} + }; /* Set XFI clock comp as default. */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, (3<<13)); + + for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++) + bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, + wc_regs[i].val);
- bnx2x_warpcore_reset_lane(bp, phy, 1); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, 0x014a); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, 0x0800); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX_FIR_TAP, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); - bnx2x_warpcore_reset_lane(bp, phy, 0); + }
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, @@@ -4322,7 -4260,7 +4323,7 @@@ static void bnx2x_warpcore_config_runti if (!vars->turn_to_run_wc_rt) return;
- /* return if there is no link partner */ + /* Return if there is no link partner */ if (!(bnx2x_warpcore_get_sigdet(phy, params))) { DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); return; @@@ -4356,7 -4294,7 +4357,7 @@@ bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_reset_lane(bp, phy, 0);
- /* restart Autoneg */ + /* Restart Autoneg */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
@@@ -4373,23 -4311,6 +4374,23 @@@ } /*params->rx_tx_asic_rst*/
} +static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 lane = bnx2x_get_warpcore_lane(phy, params); + struct bnx2x *bp = params->bp; + bnx2x_warpcore_clear_regs(phy, params, lane); + if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] == + SPEED_10000) && + (phy->media_type != ETH_PHY_SFP_1G_FIBER)) { + DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 0); + } else { + DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); + bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0); + } +} + static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@@ -4450,11 -4371,19 +4451,11 @@@ break;
case PORT_HW_CFG_NET_SERDES_IF_SFI: /* Issue Module detection */ if (bnx2x_is_sfp_module_plugged(phy, params)) bnx2x_sfp_module_detection(phy, params); + + bnx2x_warpcore_config_sfi(phy, params); break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS: @@@ -4571,9 -4500,12 +4572,9 @@@ static void bnx2x_set_warpcore_loopback CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, 0); /* Enable 1G MDIO (1-copy) */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - val16 | 0x10); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + 0x10); /* Set 1G loopback based on lane (1-copy) */ lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@@ -4586,19 -4518,22 +4587,19 @@@ bnx2x_set_aer_mmd(params, phy); } else { /* 10G & 20G */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | - 0x4000); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x4000);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); } }
-void bnx2x_sync_link(struct link_params *params, - struct link_vars *vars) + +static void bnx2x_sync_link(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g_plus; @@@ -4671,7 -4606,7 +4672,7 @@@ USES_WARPCORE(bp) && (vars->line_speed == SPEED_1000)) vars->phy_flags |= PHY_SGMII_FLAG; - /* anything 10 and over uses the bmac */ + /* Anything 10 and over uses the bmac */ link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) { @@@ -4685,7 -4620,7 +4686,7 @@@ else vars->mac_type = MAC_TYPE_EMAC; } - } else { /* link down */ + } else { /* Link down */ DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0; @@@ -4694,12 -4629,10 +4695,12 @@@ vars->duplex = DUPLEX_FULL; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* indicate no mac active */ + /* Indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) + vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; } }
@@@ -4765,7 -4698,7 +4766,7 @@@ static void bnx2x_set_master_ln(struct PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- /* set the master_ln for AN */ + /* Set the master_ln for AN */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_XGXS_BLOCK2, MDIO_XGXS_BLOCK2_TEST_MODE_LANE, @@@ -4788,7 -4721,7 +4789,7 @@@ static int bnx2x_reset_unicore(struct l MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- /* reset the unicore */ + /* Reset the unicore */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, @@@ -4797,11 -4730,11 +4798,11 @@@ if (set_serdes) bnx2x_set_serdes_access(bp, params->port);
- /* wait for the reset to self clear */ + /* Wait for the reset to self clear */ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { udelay(5);
- /* the reset erased the previous bank value */ + /* The reset erased the previous bank value */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, @@@ -5019,7 -4952,7 +5020,7 @@@ static void bnx2x_set_autoneg(struct bn MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); }
-/* program SerDes, forced speed */ +/* Program SerDes, forced speed */ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@@ -5027,7 -4960,7 +5028,7 @@@ struct bnx2x *bp = params->bp; u16 reg_val;
- /* program duplex, disable autoneg and sgmii*/ + /* Program duplex, disable autoneg and sgmii*/ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); @@@ -5046,7 -4979,7 +5047,7 @@@ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_MISC1, ®_val); - /* clearing the speed value before setting the right speed */ + /* Clearing the speed value before setting the right speed */ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | @@@ -5075,7 -5008,7 +5076,7 @@@ static void bnx2x_set_brcm_cl37_adverti struct bnx2x *bp = params->bp; u16 val = 0;
- /* set extended capabilities */ + /* Set extended capabilities */ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) val |= MDIO_OVER_1G_UP1_2_5G; if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) @@@ -5095,7 -5028,7 +5096,7 @@@ static void bnx2x_set_ieee_aneg_adverti { struct bnx2x *bp = params->bp; u16 val; - /* for AN, we are always publishing full duplex */ + /* For AN, we are always publishing full duplex */
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, @@@ -5157,14 -5090,14 +5158,14 @@@ static void bnx2x_initialize_sgmii_proc struct bnx2x *bp = params->bp; u16 control1;
- /* in SGMII mode, the unicore is always slave */ + /* In SGMII mode, the unicore is always slave */
CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &control1); control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; - /* set sgmii mode (and not fiber) */ + /* Set sgmii mode (and not fiber) */ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); @@@ -5173,9 -5106,9 +5174,9 @@@ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, control1);
- /* if forced speed */ + /* If forced speed */ if (!(vars->line_speed == SPEED_AUTO_NEG)) { - /* set speed, disable autoneg */ + /* Set speed, disable autoneg */ u16 mii_control;
CL22_RD_OVER_CL45(bp, phy, @@@ -5196,16 -5129,16 +5197,16 @@@ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; break; case SPEED_10: - /* there is nothing to set for 10M */ + /* There is nothing to set for 10M */ break; default: - /* invalid speed for SGMII */ + /* Invalid speed for SGMII */ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", vars->line_speed); break; }
- /* setting the full duplex */ + /* Setting the full duplex */ if (phy->req_duplex == DUPLEX_FULL) mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; @@@ -5215,7 -5148,7 +5216,7 @@@ mii_control);
} else { /* AN mode */ - /* enable and restart AN */ + /* Enable and restart AN */ bnx2x_restart_autoneg(phy, params, 0); } } @@@ -5311,7 -5244,7 +5312,7 @@@ static void bnx2x_flow_ctrl_resolve(str struct bnx2x *bp = params->bp; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* resolve from gp_status in case of AN complete and not sgmii */ + /* Resolve from gp_status in case of AN complete and not sgmii */ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { /* Update the advertised flow-controled of LD/LP in AN */ if (phy->req_line_speed == SPEED_AUTO_NEG) @@@ -5535,7 -5468,7 +5536,7 @@@ static int bnx2x_link_settings_status(s bnx2x_xgxs_an_resolve(phy, params, vars, gp_status); } - } else { /* link_down */ + } else { /* Link_down */ if ((phy->req_line_speed == SPEED_AUTO_NEG) && SINGLE_MEDIA_DIRECT(params)) { /* Check signal is detected */ @@@ -5684,12 -5617,12 +5685,12 @@@ static void bnx2x_set_gmii_tx_driver(st u16 tx_driver; u16 bank;
- /* read precomp */ + /* Read precomp */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_LP_UP2, &lp_up2);
- /* bits [10:7] at lp_up2, positioned at [15:12] */ + /* Bits [10:7] at lp_up2, positioned at [15:12] */ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); @@@ -5703,7 -5636,7 +5704,7 @@@ bank, MDIO_TX0_TX_DRIVER, &tx_driver);
- /* replace tx_driver bits [15:12] */ + /* Replace tx_driver bits [15:12] */ if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; @@@ -5799,16 -5732,16 +5800,16 @@@ static void bnx2x_xgxs_config_init(stru FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) bnx2x_set_preemphasis(phy, params);
- /* forced speed requested? */ + /* Forced speed requested? */ if (vars->line_speed != SPEED_AUTO_NEG || (SINGLE_MEDIA_DIRECT(params) && params->loopback_mode == LOOPBACK_EXT)) { DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- /* disable autoneg */ + /* Disable autoneg */ bnx2x_set_autoneg(phy, params, vars, 0);
- /* program speed and duplex */ + /* Program speed and duplex */ bnx2x_program_serdes(phy, params, vars);
} else { /* AN_mode */ @@@ -5817,14 -5750,14 +5818,14 @@@ /* AN enabled */ bnx2x_set_brcm_cl37_advertisement(phy, params);
- /* program duplex & pause advertisement (for aneg) */ + /* Program duplex & pause advertisement (for aneg) */ bnx2x_set_ieee_aneg_advertisement(phy, params, vars->ieee_fc);
- /* enable autoneg */ + /* Enable autoneg */ bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- /* enable and restart AN */ + /* Enable and restart AN */ bnx2x_restart_autoneg(phy, params, enable_cl73); }
@@@ -5860,12 -5793,12 +5861,12 @@@ static int bnx2x_prepare_xgxs(struct bn bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0); - /* reset the SerDes and wait for reset bit return low */ - if (rc != 0) + /* Reset the SerDes and wait for reset bit return low */ + if (rc) return rc;
bnx2x_set_aer_mmd(params, phy); - /* setting the masterLn_def again after the reset */ + /* Setting the masterLn_def again after the reset */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { bnx2x_set_master_ln(params, phy); bnx2x_set_swap_lanes(params, phy); @@@ -5890,7 -5823,7 +5891,7 @@@ static u16 bnx2x_wait_reset_complete(st MDIO_PMA_REG_CTRL, &ctrl); if (!(ctrl & (1<<15))) break; - msleep(1); + usleep_range(1000, 2000); }
if (cnt == 1000) @@@ -6121,7 -6054,7 +6122,7 @@@ static void bnx2x_set_xgxs_loopback(str DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) { - /* change the uni_phy_addr in the nig */ + /* Change the uni_phy_addr in the nig */ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18));
@@@ -6141,11 -6074,11 +6142,11 @@@ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 0x6041); msleep(200); - /* set aer mmd back */ + /* Set aer mmd back */ bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) { - /* and md_devad */ + /* And md_devad */ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); } @@@ -6342,7 -6275,7 +6343,7 @@@ int bnx2x_test_link(struct link_params MDIO_REG_BANK_GP_STATUS, MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); - /* link is up only if both local phy and external phy are up */ + /* Link is up only if both local phy and external phy are up */ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) return -ESRCH; } @@@ -6363,9 -6296,7 +6364,9 @@@ for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { serdes_phy_type = ((params->phy[phy_index].media_type == - ETH_PHY_SFP_FIBER) || + ETH_PHY_SFPP_10G_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_SFP_1G_FIBER) || (params->phy[phy_index].media_type == ETH_PHY_XFP_FIBER) || (params->phy[phy_index].media_type == @@@ -6466,7 -6397,7 +6467,7 @@@ static int bnx2x_link_initialize(struc static void bnx2x_int_link_reset(struct bnx2x_phy *phy, struct link_params *params) { - /* reset the SerDes/XGXS */ + /* Reset the SerDes/XGXS */ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, (0x1ff << (params->port*16))); } @@@ -6499,10 -6430,10 +6500,10 @@@ static int bnx2x_update_link_down(struc DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); bnx2x_set_led(params, vars, LED_MODE_OFF, 0); vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; - /* indicate no mac active */ + /* Indicate no mac active */ vars->mac_type = MAC_TYPE_NONE;
- /* update shared memory */ + /* Update shared memory */ vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | LINK_STATUS_LINK_UP | LINK_STATUS_PHYSICAL_LINK_FLAG | @@@ -6515,15 -6446,15 +6516,15 @@@ vars->line_speed = 0; bnx2x_update_mng(params, vars->link_status);
- /* activate nig drain */ + /* Activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable emac */ + /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10); - /* reset BigMac/Xmac */ + usleep_range(10000, 20000); + /* Reset BigMac/Xmac */ if (CHIP_IS_E1x(bp) || CHIP_IS_E2(bp)) { bnx2x_bmac_rx_disable(bp, params->port); @@@ -6532,16 -6463,6 +6533,16 @@@ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); } if (CHIP_IS_E3(bp)) { + /* Prevent LPI Generation by chip */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), + 0); + REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); + REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), + 0); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + + bnx2x_update_mng_eee(params, vars->eee_status); bnx2x_xmac_disable(params); bnx2x_umac_disable(params); } @@@ -6581,16 -6502,6 +6582,16 @@@ static int bnx2x_update_link_up(struct bnx2x_umac_enable(params, vars, 0); bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); + + if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) && + (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) { + DP(NETIF_MSG_LINK, "Enabling LPI assertion\n"); + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + + (params->port << 2), 1); + REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1); + REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + + (params->port << 2), 0xfc20); + } } if ((CHIP_IS_E1x(bp) || CHIP_IS_E2(bp))) { @@@ -6623,12 -6534,12 +6624,12 @@@ rc |= bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* disable drain */ + /* Disable drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
- /* update shared memory */ + /* Update shared memory */ bnx2x_update_mng(params, vars->link_status); - + bnx2x_update_mng_eee(params, vars->eee_status); /* Check remote fault */ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { @@@ -6672,8 -6583,6 +6673,8 @@@ int bnx2x_link_update(struct link_param phy_vars[phy_index].phy_link_up = 0; phy_vars[phy_index].link_up = 0; phy_vars[phy_index].fault_detected = 0; + /* different consideration, since vars holds inner state */ + phy_vars[phy_index].eee_status = vars->eee_status; }
if (USES_WARPCORE(bp)) @@@ -6694,7 -6603,7 +6695,7 @@@ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
- /* disable emac */ + /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@@ -6803,9 -6712,6 +6804,9 @@@ vars->link_status |= LINK_STATUS_SERDES_LINK; else vars->link_status &= ~LINK_STATUS_SERDES_LINK; + + vars->eee_status = phy_vars[active_external_phy].eee_status; + DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", active_external_phy); } @@@ -6839,11 -6745,11 +6840,11 @@@ } else if (prev_line_speed != vars->line_speed) { REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - msleep(1); + usleep_range(1000, 2000); } }
- /* anything 10 and over uses the bmac */ + /* Anything 10 and over uses the bmac */ link_10g_plus = (vars->line_speed >= SPEED_10000);
bnx2x_link_int_ack(params, vars, link_10g_plus); @@@ -6909,7 -6815,7 +6910,7 @@@ void bnx2x_ext_phy_hw_reset(struct bnx2 { bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - msleep(1); + usleep_range(1000, 2000); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); } @@@ -7006,7 -6912,7 +7007,7 @@@ static int bnx2x_8073_8727_external_rom MDIO_PMA_REG_GEN_CTRL, 0x0001);
- /* ucode reboot and rst */ + /* Ucode reboot and rst */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, @@@ -7050,7 -6956,7 +7051,7 @@@ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- msleep(1); + usleep_range(1000, 2000); } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || ((fw_msgout & 0xff) != 0x03 && (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); @@@ -7144,11 -7050,11 +7145,11 @@@ static int bnx2x_8073_xaui_wa(struct bn "XAUI workaround has completed\n"); return 0; } - msleep(3); + usleep_range(3000, 6000); } break; } - msleep(3); + usleep_range(3000, 6000); } DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); return -EINVAL; @@@ -7222,7 -7128,7 +7223,7 @@@ static int bnx2x_8073_config_init(struc bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* enable LASI */ + /* Enable LASI */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); bnx2x_cl45_write(bp, phy, @@@ -7370,7 -7276,7 +7371,7 @@@ static u8 bnx2x_8073_read_status(struc
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
- /* clear the interrupt LASI status register */ + /* Clear the interrupt LASI status register */ bnx2x_cl45_read(bp, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); bnx2x_cl45_read(bp, phy, @@@ -7695,7 -7601,7 +7696,7 @@@ static int bnx2x_8726_read_sfp_module_e struct bnx2x *bp = params->bp; u16 val = 0; u16 i; - if (byte_cnt > 16) { + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 0xf\n"); return -EINVAL; @@@ -7749,11 -7655,33 +7750,33 @@@ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; - msleep(1); + usleep_range(1000, 2000); } return -EINVAL; }
+ static void bnx2x_warpcore_power_module(struct link_params *params, + struct bnx2x_phy *phy, + u8 power) + { + u32 pin_cfg; + struct bnx2x *bp = params->bp; + + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_PWR_DIS_MASK) >> + PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", + power, pin_cfg); + /* Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); + } static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, u16 addr, u8 byte_cnt, @@@ -7764,8 -7692,7 +7787,8 @@@ u32 data_array[4]; u16 addr32; struct bnx2x *bp = params->bp; - if (byte_cnt > 16) { + + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 16 bytes\n"); return -EINVAL; @@@ -7774,6 -7701,12 +7797,12 @@@ /* 4 byte aligned address */ addr32 = addr & (~0x3); do { + if (cnt == I2C_WA_PWR_ITER) { + bnx2x_warpcore_power_module(params, phy, 0); + /* Note that 100us are not enough here */ + usleep_range(1000,1000); + bnx2x_warpcore_power_module(params, phy, 1); + } rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, data_array); } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); @@@ -7795,7 -7728,7 +7824,7 @@@ static int bnx2x_8727_read_sfp_module_e struct bnx2x *bp = params->bp; u16 val, i;
- if (byte_cnt > 16) { + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 0xf\n"); return -EINVAL; @@@ -7832,7 -7765,7 +7861,7 @@@ /* Wait appropriate time for two-wire command to finish before * polling the status register */ - msleep(1); + usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { @@@ -7868,7 -7801,7 +7897,7 @@@ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; - msleep(1); + usleep_range(1000, 2000); }
return -EINVAL; @@@ -7878,7 -7811,7 +7907,7 @@@ int bnx2x_read_sfp_module_eeprom(struc struct link_params *params, u16 addr, u8 byte_cnt, u8 *o_buf) { - int rc = -EINVAL; + int rc = -EOPNOTSUPP; switch (phy->type) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, @@@ -7903,7 -7836,7 +7932,7 @@@ static int bnx2x_get_edc_mode(struct bn { struct bnx2x *bp = params->bp; u32 sync_offset = 0, phy_idx, media_types; - u8 val, check_limiting_mode = 0; + u8 val[2], check_limiting_mode = 0; *edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED; @@@ -7911,13 -7844,13 +7940,13 @@@ if (bnx2x_read_sfp_module_eeprom(phy, params, SFP_EEPROM_CON_TYPE_ADDR, - 1, - &val) != 0) { + 2, + (u8 *)val) != 0) { DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); return -EINVAL; }
- switch (val) { + switch (val[0]) { case SFP_EEPROM_CON_TYPE_VAL_COPPER: { u8 copper_module_type; @@@ -7955,29 -7888,13 +7984,29 @@@ break; } case SFP_EEPROM_CON_TYPE_VAL_LC: - phy->media_type = ETH_PHY_SFP_FIBER; - DP(NETIF_MSG_LINK, "Optic module detected\n"); check_limiting_mode = 1; + if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | + SFP_EEPROM_COMP_CODE_LR_MASK | + SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { + DP(NETIF_MSG_LINK, "1G Optic module detected\n"); + phy->media_type = ETH_PHY_SFP_1G_FIBER; + phy->req_line_speed = SPEED_1000; + } else { + int idx, cfg_idx = 0; + DP(NETIF_MSG_LINK, "10G Optic module detected\n"); + for (idx = INT_PHY; idx < MAX_PHYS; idx++) { + if (params->phy[idx].type == phy->type) { + cfg_idx = LINK_CONFIG_IDX(idx); + break; + } + } + phy->media_type = ETH_PHY_SFPP_10G_FIBER; + phy->req_line_speed = params->req_line_speed[cfg_idx]; + } break; default: DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", - val); + val[0]); return -EINVAL; } sync_offset = params->shmem_base + @@@ -8063,7 -7980,7 +8092,7 @@@ static int bnx2x_verify_sfp_module(stru return 0; }
- /* format the warning message */ + /* Format the warning message */ if (bnx2x_read_sfp_module_eeprom(phy, params, SFP_EEPROM_VENDOR_NAME_ADDR, @@@ -8109,7 -8026,7 +8138,7 @@@ static int bnx2x_wait_for_sfp_module_in timeout * 5); return 0; } - msleep(5); + usleep_range(5000, 10000); } return -EINVAL; } @@@ -8312,29 -8229,6 +8341,6 @@@ static void bnx2x_set_sfp_module_fault_ bnx2x_set_e1e2_module_fault_led(params, gpio_mode); }
- static void bnx2x_warpcore_power_module(struct link_params *params, - struct bnx2x_phy *phy, - u8 power) - { - u32 pin_cfg; - struct bnx2x *bp = params->bp; - - pin_cfg = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & - PORT_HW_CFG_E3_PWR_DIS_MASK) >> - PORT_HW_CFG_E3_PWR_DIS_SHIFT; - - if (pin_cfg == PIN_CFG_NA) - return; - DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", - power, pin_cfg); - /* Low ==> corresponding SFP+ module is powered - * high ==> the SFP+ module is powered down - */ - bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); - } - static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { @@@ -8444,7 -8338,7 +8450,7 @@@ int bnx2x_sfp_module_detection(struct b DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); return -EINVAL; } else if (bnx2x_verify_sfp_module(phy, params) != 0) { - /* check SFP+ module compatibility */ + /* Check SFP+ module compatibility */ DP(NETIF_MSG_LINK, "Module verification failed!!\n"); rc = -EINVAL; /* Turn on fault module-detected led */ @@@ -8507,34 -8401,14 +8513,34 @@@ void bnx2x_handle_module_detect_int(str
/* Call the handling function in case module is detected */ if (gpio_val == 0) { + bnx2x_set_mdio_clk(bp, params->chip_id, params->port); + bnx2x_set_aer_mmd(params, phy); + bnx2x_power_sfp_module(params, phy, 1); bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, gpio_port); - if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) { bnx2x_sfp_module_detection(phy, params); - else + if (CHIP_IS_E3(bp)) { + u16 rx_tx_in_reset; + /* In case WC is out of reset, reconfigure the + * link speed while taking into account 1G + * module limitation. + */ + bnx2x_cl45_read(bp, phy, + MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, + &rx_tx_in_reset); + if (!rx_tx_in_reset) { + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_config_sfi(phy, params); + bnx2x_warpcore_reset_lane(bp, phy, 0); + } + } + } else { DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + } } else { u32 val = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. @@@ -8595,7 -8469,7 +8601,7 @@@ static u8 bnx2x_8706_8726_read_status(s bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, MDIO_PMA_LASI_TXCTRL);
- /* clear LASI indication*/ + /* Clear LASI indication*/ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); bnx2x_cl45_read(bp, phy, @@@ -8663,7 -8537,7 +8669,7 @@@ static u8 bnx2x_8706_config_init(struc MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); if (val) break; - msleep(10); + usleep_range(10000, 20000); } DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); if ((params->feature_config_flags & @@@ -8792,7 -8666,7 +8798,7 @@@ static void bnx2x_8726_external_rom_boo MDIO_PMA_REG_GEN_CTRL, MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 150ms for microcode load */ + /* Wait for 150ms for microcode load */ msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ @@@ -8986,63 -8860,6 +8992,63 @@@ static void bnx2x_8727_hw_reset(struct MISC_REGISTERS_GPIO_OUTPUT_LOW, port); }
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 tmp1, val; + /* Set option 1G speed */ + if ((phy->req_line_speed == SPEED_1000) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER)) { + DP(NETIF_MSG_LINK, "Setting 1G force\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); + DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); + /* Power down the XAUI until link is up in case of dual-media + * and 1G + */ + if (DUAL_MEDIA(params)) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val); + val |= (3<<10); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val); + } + } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); + } else { + /* Since the 8727 has only single reset pin, need to set the 10G + * registers although it is default + */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, + 0x0020); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, + 0x0008); + } +} + static int bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@@ -9060,7 -8877,7 +9066,7 @@@ lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); - /* enable LASI */ + /* Enable LASI */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, rx_alarm_ctrl_val); @@@ -9112,7 -8929,56 +9118,7 @@@ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- /* Set option 1G speed */ - if (phy->req_line_speed == SPEED_1000) { - DP(NETIF_MSG_LINK, "Setting 1G force\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); - DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /* Power down the XAUI until link is up in case of dual-media - * and 1G - */ - if (DUAL_MEDIA(params)) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, &val); - val |= (3<<10); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, val); - } - } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { - - DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); - } else { - /* Since the 8727 has only single reset pin, need to set the 10G - * registers although it is default - */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, - 0x0020); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, - 0x0008); - } - + bnx2x_8727_config_speed(phy, params); /* Set 2-wire transfer rate of SFP+ module EEPROM * to 100Khz since some DACs(direct attached cables) do * not work at 400Khz. @@@ -9239,9 -9105,6 +9245,9 @@@ static void bnx2x_8727_handle_mod_abs(s bnx2x_sfp_module_detection(phy, params); else DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + + /* Reconfigure link speed based on module type limitations */ + bnx2x_8727_config_speed(phy, params); }
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", @@@ -9722,9 -9585,9 +9728,9 @@@ static int bnx2x_8481_config_init(struc static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, - u16 cmd_args[]) + u16 cmd_args[], int argc) { - u32 idx; + int idx; u16 val; struct bnx2x *bp = params->bp; /* Write CMD_OPEN_OVERRIDE to STATUS reg */ @@@ -9736,7 -9599,7 +9742,7 @@@ MDIO_84833_CMD_HDLR_STATUS, &val); if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; - msleep(1); + usleep_range(1000, 2000); } if (idx >= PHY84833_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); @@@ -9744,7 -9607,7 +9750,7 @@@ }
/* Prepare argument(s) and issue command */ - for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { + for (idx = 0; idx < argc; idx++) { bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, MDIO_84833_CMD_HDLR_DATA1 + idx, cmd_args[idx]); @@@ -9757,7 -9620,7 +9763,7 @@@ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; - msleep(1); + usleep_range(1000, 2000); } if ((idx >= PHY84833_CMDHDLR_WAIT) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { @@@ -9765,7 -9628,7 +9771,7 @@@ return -EINVAL; } /* Gather returning data */ - for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { + for (idx = 0; idx < argc; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_84833_CMD_HDLR_DATA1 + idx, &cmd_args[idx]); @@@ -9799,7 -9662,7 +9805,7 @@@ static int bnx2x_84833_pair_swap_cfg(st data[1] = (u16)pair_swap;
status = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_PAIR_SWAP, data); + PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@@ -9877,95 -9740,6 +9883,95 @@@ static int bnx2x_84833_hw_reset_phy(str return 0; }
+static int bnx2x_8483x_eee_timers(struct link_params *params, + struct link_vars *vars) +{ + u32 eee_idle = 0, eee_mode; + struct bnx2x *bp = params->bp; + + eee_idle = bnx2x_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && + (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { + DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); + return -EINVAL; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) + return -EINVAL; + vars->eee_status |= eee_mode; + } + + return 0; +} + +static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + struct bnx2x *bp = params->bp; + u16 cmd_args = 0; + + DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); + + /* Make Certain LPI is disabled */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); + + /* Prevent Phy from working in EEE and advertising it */ + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc) { + DP(NETIF_MSG_LINK, "EEE disable failed.\n"); + return rc; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0); + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return 0; +} + +static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + struct bnx2x *bp = params->bp; + u16 cmd_args = 1; + + DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); + + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc) { + DP(NETIF_MSG_LINK, "EEE enable failed.\n"); + return rc; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8); + + /* Mask events preventing LPI generation */ + REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT); + + return 0; +} + #define PHY84833_CONSTANT_LATENCY 1193 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_params *params, @@@ -9978,9 -9752,9 +9984,9 @@@ u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; int rc = 0;
- msleep(1); + usleep_range(1000, 2000);
- if (!(CHIP_IS_E1(bp))) + if (!(CHIP_IS_E1x(bp))) port = BP_PATH(bp); else port = params->port; @@@ -10065,9 -9839,8 +10071,9 @@@ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; cmd_args[3] = PHY84833_CONSTANT_LATENCY; rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args); - if (rc != 0) + PHY84833_CMD_SET_EEE_MODE, cmd_args, + PHY84833_CMDHDLR_MAX_ARGS); + if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } if (initialize) @@@ -10091,48 -9864,6 +10097,48 @@@ MDIO_CTL_REG_84823_USER_CTRL_REG, val); }
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_FW_REV, &val); + + /* Configure EEE support */ + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) { + phy->flags |= FLAGS_EEE_10GBT; + vars->eee_status |= SHMEM_EEE_10G_ADV << + SHMEM_EEE_SUPPORTED_SHIFT; + /* Propogate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + rc = bnx2x_8483x_eee_timers(params, vars); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_8483x_disable_eee(phy, params, vars); + return rc; + } + + if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) && + (params->eee_mode & EEE_MODE_ADV_LPI) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) + rc = bnx2x_8483x_enable_eee(phy, params, vars); + else + rc = bnx2x_8483x_disable_eee(phy, params, vars); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); + return rc; + } + } else { + phy->flags &= ~FLAGS_EEE_10GBT; + vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { /* Bring PHY out of super isolate mode as the final step. */ bnx2x_cl45_read(bp, phy, @@@ -10187,19 -9918,17 +10193,19 @@@ static u8 bnx2x_848xx_read_status(struc DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", legacy_status); link_up = ((legacy_status & (1<<11)) == (1<<11)); - if (link_up) { - legacy_speed = (legacy_status & (3<<9)); - if (legacy_speed == (0<<9)) - vars->line_speed = SPEED_10; - else if (legacy_speed == (1<<9)) - vars->line_speed = SPEED_100; - else if (legacy_speed == (2<<9)) - vars->line_speed = SPEED_1000; - else /* Should not happen */ - vars->line_speed = 0; + legacy_speed = (legacy_status & (3<<9)); + if (legacy_speed == (0<<9)) + vars->line_speed = SPEED_10; + else if (legacy_speed == (1<<9)) + vars->line_speed = SPEED_100; + else if (legacy_speed == (2<<9)) + vars->line_speed = SPEED_1000; + else { /* Should not happen: Treat as link down */ + vars->line_speed = 0; + link_up = 0; + }
+ if (link_up) { if (legacy_status & (1<<8)) vars->duplex = DUPLEX_FULL; else @@@ -10227,7 -9956,7 +10233,7 @@@ } } if (link_up) { - DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", + DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n", vars->line_speed); bnx2x_ext_phy_resolve_fc(phy, params, vars);
@@@ -10266,31 -9995,6 +10272,31 @@@ if (val & (1<<11)) vars->link_status |= LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + /* Determine if EEE was negotiated */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + u32 eee_shmem = 0; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, &val1); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_EEE_ADV, &val2); + if ((val1 & val2) & 0x8) { + DP(NETIF_MSG_LINK, "EEE negotiated\n"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } + + if (val2 & 0x12) + eee_shmem |= SHMEM_EEE_100M_ADV; + if (val2 & 0x4) + eee_shmem |= SHMEM_EEE_1G_ADV; + if (val2 & 0x68) + eee_shmem |= SHMEM_EEE_10G_ADV; + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (eee_shmem << + SHMEM_EEE_LP_ADV_STATUS_SHIFT); + } }
return link_up; @@@ -10569,7 -10273,7 +10575,7 @@@ static int bnx2x_54618se_config_init(st u32 cfg_pin;
DP(NETIF_MSG_LINK, "54618SE cfg init\n"); - usleep_range(1000, 1000); + usleep_range(1000, 2000);
/* This works with E3 only, no need to check the chip * before determining the port. @@@ -10638,7 -10342,7 +10644,7 @@@ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- /* read all advertisement */ + /* Read all advertisement */ bnx2x_cl22_read(bp, phy, 0x09, &an_1000_val); @@@ -10675,7 -10379,7 +10681,7 @@@ 0x09, &an_1000_val);
- /* set 100 speed advertisement */ + /* Set 100 speed advertisement */ if (((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | @@@ -10689,7 -10393,7 +10695,7 @@@ DP(NETIF_MSG_LINK, "Advertising 100M\n"); }
- /* set 10 speed advertisement */ + /* Set 10 speed advertisement */ if (((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | @@@ -10828,7 -10532,7 +10834,7 @@@ static u8 bnx2x_54618se_read_status(str
/* Get speed operation status */ bnx2x_cl22_read(bp, phy, - 0x19, + MDIO_REG_GPHY_AUX_STATUS, &legacy_status); DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
@@@ -11055,7 -10759,7 +11061,7 @@@ static u8 bnx2x_7101_read_status(struc DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", val2, val1); link_up = ((val1 & 4) == 4); - /* if link is up print the AN outcome of the SFX7101 PHY */ + /* If link is up print the AN outcome of the SFX7101 PHY */ if (link_up) { bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, @@@ -11067,7 -10771,7 +11073,7 @@@ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_ext_phy_resolve_fc(phy, params, vars);
- /* read LP advertised speeds */ + /* Read LP advertised speeds */ if (val2 & (1<<11)) vars->link_status |= LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; @@@ -11386,7 -11090,7 +11392,7 @@@ static struct bnx2x_phy phy_8706 = SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_SFP_FIBER, + .media_type = ETH_PHY_SFPP_10G_FIBER, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, @@@ -11545,8 -11249,7 +11551,8 @@@ static struct bnx2x_phy phy_84833 = .def_md_devad = 0, .flags = (FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL | - FLAGS_TX_ERROR_CHECK), + FLAGS_TX_ERROR_CHECK | + FLAGS_EEE_10GBT), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@@ -11725,7 -11428,7 +11731,7 @@@ static int bnx2x_populate_int_phy(struc SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy->media_type = ETH_PHY_SFP_FIBER; + phy->media_type = ETH_PHY_SFPP_10G_FIBER; break; case PORT_HW_CFG_NET_SERDES_IF_KR: phy->media_type = ETH_PHY_KR; @@@ -12265,7 -11968,7 +12271,7 @@@ int bnx2x_phy_init(struct link_params * vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0;
- /* disable attentions */ + /* Disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | @@@ -12314,8 -12017,6 +12320,8 @@@ break; } bnx2x_update_mng(params, vars->link_status); + + bnx2x_update_mng_eee(params, vars->eee_status); return 0; }
@@@ -12325,22 -12026,19 +12331,22 @@@ int bnx2x_link_reset(struct link_param struct bnx2x *bp = params->bp; u8 phy_index, port = params->port, clear_latch_ind = 0; DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); - /* disable attentions */ + /* Disable attentions */ vars->link_status = 0; bnx2x_update_mng(params, vars->link_status); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + bnx2x_update_mng_eee(params, vars->eee_status); bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | NIG_MASK_SERDES0_LINK_STATUS | NIG_MASK_MI_INT));
- /* activate nig drain */ + /* Activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable nig egress interface */ + /* Disable nig egress interface */ if (!CHIP_IS_E3(bp)) { REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); @@@ -12353,15 -12051,15 +12359,15 @@@ bnx2x_xmac_disable(params); bnx2x_umac_disable(params); } - /* disable emac */ + /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10); + usleep_range(10000, 20000); /* The PHY reset is controlled by GPIO 1 * Hold it as vars low */ - /* clear link led */ + /* Clear link led */ bnx2x_set_mdio_clk(bp, params->chip_id, port); bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
@@@ -12391,9 -12089,9 +12397,9 @@@ params->phy[INT_PHY].link_reset( ¶ms->phy[INT_PHY], params);
- /* disable nig ingress interface */ + /* Disable nig ingress interface */ if (!CHIP_IS_E3(bp)) { - /* reset BigMac */ + /* Reset BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); @@@ -12450,7 -12148,7 +12456,7 @@@ static int bnx2x_8073_common_init_phy(s DP(NETIF_MSG_LINK, "populate_phy failed\n"); return -EINVAL; } - /* disable attentions */ + /* Disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port_of_path*4, (NIG_MASK_XGXS0_LINK_STATUS | @@@ -12524,7 -12222,7 +12530,7 @@@ bnx2x_cl45_write(bp, phy_blk[port], MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); - msleep(15); + usleep_range(15000, 30000);
/* Read modify write the SPI-ROM version select register */ bnx2x_cl45_read(bp, phy_blk[port], @@@ -12556,7 -12254,7 +12562,7 @@@ static int bnx2x_8726_common_init_phy(s REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
bnx2x_ext_phy_hw_reset(bp, 0); - msleep(5); + usleep_range(5000, 10000); for (port = 0; port < PORT_MAX; port++) { u32 shmem_base, shmem2_base;
@@@ -12663,11 -12361,11 +12669,11 @@@ static int bnx2x_8727_common_init_phy(s /* Initiate PHY reset*/ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - msleep(1); + usleep_range(1000, 2000); bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
- msleep(5); + usleep_range(5000, 10000);
/* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { @@@ -12761,7 -12459,7 +12767,7 @@@ static int bnx2x_84833_pre_init_phy(str MDIO_PMA_REG_CTRL, &val); if (!(val & (1<<15))) break; - msleep(1); + usleep_range(1000, 2000); } if (cnt >= 1500) { DP(NETIF_MSG_LINK, "84833 reset timeout\n"); @@@ -12851,7 -12549,7 +12857,7 @@@ static int bnx2x_ext_phy_common_init(st break; }
- if (rc != 0) + if (rc) netdev_err(bp->dev, "Warning: PHY was not initialized," " Port %d\n", 0); @@@ -12932,41 -12630,30 +12938,41 @@@ static void bnx2x_check_over_curr(struc vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; }
-static void bnx2x_analyze_link_error(struct link_params *params, - struct link_vars *vars, u32 lss_status, - u8 notify) +/* Returns 0 if no change occured since last check; 1 otherwise. */ +static u8 bnx2x_analyze_link_error(struct link_params *params, + struct link_vars *vars, u32 status, + u32 phy_flag, u32 link_flag, u8 notify) { struct bnx2x *bp = params->bp; /* Compare new value with previous value */ u8 led_mode; - u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; + u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
- if ((lss_status ^ half_open_conn) == 0) - return; + if ((status ^ old_status) == 0) + return 0;
/* If values differ */ - DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, - half_open_conn, lss_status); + switch (phy_flag) { + case PHY_HALF_OPEN_CONN_FLAG: + DP(NETIF_MSG_LINK, "Analyze Remote Fault\n"); + break; + case PHY_SFP_TX_FAULT_FLAG: + DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); + break; + default: + DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); + } + DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, + old_status, status);
/* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ - if (lss_status) { - DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); + if (status) { vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_status |= link_flag; vars->link_up = 0; - vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->phy_flags |= phy_flag;
/* activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); @@@ -12975,10 -12662,10 +12981,10 @@@ */ led_mode = LED_MODE_OFF; } else { - DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_status &= ~link_flag; vars->link_up = 1; - vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->phy_flags &= ~phy_flag; led_mode = LED_MODE_OPER;
/* Clear nig drain */ @@@ -12995,8 -12682,6 +13001,8 @@@ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; if (notify) bnx2x_notify_link_changed(bp); + + return 1; }
/****************************************************************************** @@@ -13038,9 -12723,7 +13044,9 @@@ int bnx2x_check_half_open_conn(struct l if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) lss_status = 1;
- bnx2x_analyze_link_error(params, vars, lss_status, notify); + bnx2x_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { /* Check E1X / E2 BMAC */ @@@ -13057,55 -12740,11 +13063,55 @@@ REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); lss_status = (wb_data[0] > 0);
- bnx2x_analyze_link_error(params, vars, lss_status, notify); + bnx2x_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); } return 0; } +static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin, value = 0; + u8 led_change, port = params->port;
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ + cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_TX_FAULT_MASK) >> + PORT_HW_CFG_E3_TX_FAULT_SHIFT; + + if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { + DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin); + return; + } + + led_change = bnx2x_analyze_link_error(params, vars, value, + PHY_SFP_TX_FAULT_FLAG, + LINK_STATUS_SFP_TX_FAULT, 1); + + if (led_change) { + /* Change TX_Fault led, set link status for further syncs */ + u8 led_mode; + + if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { + led_mode = MISC_REGISTERS_GPIO_HIGH; + vars->link_status |= LINK_STATUS_SFP_TX_FAULT; + } else { + led_mode = MISC_REGISTERS_GPIO_LOW; + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + } + + /* If module is unapproved, led should be on regardless */ + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { + DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n", + led_mode); + bnx2x_set_e3_module_fault_led(params, led_mode); + } + } +} void bnx2x_period_func(struct link_params *params, struct link_vars *vars) { u16 phy_idx; @@@ -13124,26 -12763,7 +13130,26 @@@ struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; bnx2x_set_aer_mmd(params, phy); bnx2x_check_over_curr(params, vars); - bnx2x_warpcore_config_runtime(phy, params, vars); + if (vars->rx_tx_asic_rst) + bnx2x_warpcore_config_runtime(phy, params, vars); + + if ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) + & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SFI) { + if (bnx2x_is_sfp_module_plugged(phy, params)) { + bnx2x_sfp_tx_fault_detection(phy, params, vars); + } else if (vars->link_status & + LINK_STATUS_SFP_TX_FAULT) { + /* Clean trail, interrupt corrects the leds */ + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; + /* Update link status in the shared memory */ + bnx2x_update_mng(params, vars->link_status); + } + } + }
} diff --combined drivers/net/ethernet/emulex/benet/be_cmds.c index f899752,921c208..5eab791 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@@ -122,15 -122,15 +122,15 @@@ static int be_mcc_compl_process(struct goto done;
if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { - dev_warn(&adapter->pdev->dev, "This domain(VM) is not " - "permitted to execute this cmd (opcode %d)\n", - opcode); + dev_warn(&adapter->pdev->dev, + "opcode %d-%d is not permitted\n", + opcode, subsystem); } else { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; - dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" - "status %d, extd-status %d\n", - opcode, compl_status, extd_status); + dev_err(&adapter->pdev->dev, + "opcode %d-%d failed:status %d-%d\n", + opcode, subsystem, compl_status, extd_status); } } done: @@@ -1132,7 -1132,7 +1132,7 @@@ err * Uses MCCQ */ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, - u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) + u32 *if_handle, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_create *req; @@@ -1152,13 -1152,17 +1152,13 @@@ req->hdr.domain = domain; req->capability_flags = cpu_to_le32(cap_flags); req->enable_flags = cpu_to_le32(en_flags); - if (mac) - memcpy(req->mac_addr, mac, ETH_ALEN); - else - req->pmac_invalid = true; + + req->pmac_invalid = true;
status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(wrb); *if_handle = le32_to_cpu(resp->interface_id); - if (mac) - *pmac_id = le32_to_cpu(resp->pmac_id); }
err: @@@ -2326,8 -2330,8 +2326,8 @@@ err }
/* Uses synchronous MCCQ */ -int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, - bool *pmac_id_active, u32 *pmac_id, u8 *mac) +int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, + bool *pmac_id_active, u32 *pmac_id, u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; @@@ -2372,9 -2376,8 +2372,9 @@@ get_mac_list_cmd.va; mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids - * or one or more pseudo permanant mac addresses. If an active - * mac_id is present, return first active mac_id found + * or one or more true or pseudo permanant mac addresses. + * If an active mac_id is present, return first active mac_id + * found. */ for (i = 0; i < mac_count; i++) { struct get_list_macaddr *mac_entry; @@@ -2393,7 -2396,7 +2393,7 @@@ goto out; } } - /* If no active mac_id found, return first pseudo mac addr */ + /* If no active mac_id found, return first mac addr */ *pmac_id_active = false; memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, ETH_ALEN); diff --combined drivers/net/ethernet/emulex/benet/be_cmds.h index 2f6bb06,b3f3fc3..3c938f5 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@@ -1566,7 -1566,7 +1566,7 @@@ struct be_hw_stats_v1 u32 rsvd0[BE_TXP_SW_SZ]; struct be_erx_stats_v1 erx; struct be_pmem_stats pmem; - u32 rsvd1[3]; + u32 rsvd1[18]; };
struct be_cmd_req_get_stats_v1 { @@@ -1664,7 -1664,8 +1664,7 @@@ extern int be_cmd_pmac_add(struct be_ad extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 domain); extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, - u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, - u32 domain); + u32 en_flags, u32 *if_handle, u32 domain); extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain); extern int be_cmd_eq_create(struct be_adapter *adapter, @@@ -1750,9 -1751,8 +1750,9 @@@ extern int be_cmd_get_cntl_attributes(s extern int be_cmd_req_native_mode(struct be_adapter *adapter); extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); -extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, - bool *pmac_id_active, u32 *pmac_id, u8 *mac); +extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, + bool *pmac_id_active, u32 *pmac_id, + u8 domain); extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, diff --combined drivers/net/ethernet/emulex/benet/be_main.c index 5a34503,501dfa9..cbd245a --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -719,8 -719,8 +719,8 @@@ static netdev_tx_t be_xmit(struct sk_bu * 60 bytes long. * As a workaround disable TX vlan offloading in such cases. */ - if (unlikely(vlan_tx_tag_present(skb) && - (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) { + if (vlan_tx_tag_present(skb) && + (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) { skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto tx_drop; @@@ -786,12 -786,19 +786,12 @@@ static int be_change_mtu(struct net_dev * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. * If the user configures more, place BE in vlan promiscuous mode. */ -static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) +static int be_vid_config(struct be_adapter *adapter) { - struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num]; - u16 vtag[BE_NUM_VLANS_SUPPORTED]; - u16 ntags = 0, i; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + u16 num = 0, i; int status = 0;
- if (vf) { - vtag[0] = cpu_to_le16(vf_cfg->vlan_tag); - status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag, - 1, 1, 0); - } - /* No need to further configure vids if in promiscuous mode */ if (adapter->promiscuous) return 0; @@@ -802,10 -809,10 +802,10 @@@ /* Construct VLAN Table to give to HW */ for (i = 0; i < VLAN_N_VID; i++) if (adapter->vlan_tag[i]) - vtag[ntags++] = cpu_to_le16(i); + vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle, - vtag, ntags, 1, 0); + vids, num, 1, 0);
/* Set to VLAN promisc mode as setting VLAN filter failed */ if (status) { @@@ -834,7 -841,7 +834,7 @@@ static int be_vlan_add_vid(struct net_d
adapter->vlan_tag[vid] = 1; if (adapter->vlans_added <= (adapter->max_vlans + 1)) - status = be_vid_config(adapter, false, 0); + status = be_vid_config(adapter);
if (!status) adapter->vlans_added++; @@@ -856,7 -863,7 +856,7 @@@ static int be_vlan_rem_vid(struct net_d
adapter->vlan_tag[vid] = 0; if (adapter->vlans_added <= adapter->max_vlans) - status = be_vid_config(adapter, false, 0); + status = be_vid_config(adapter);
if (!status) adapter->vlans_added--; @@@ -883,7 -890,7 +883,7 @@@ static void be_set_rx_mode(struct net_d be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
if (adapter->vlans_added) - be_vid_config(adapter, false, 0); + be_vid_config(adapter); }
/* Enable multicast promisc if num configured exceeds what we support */ @@@ -1050,8 -1057,6 +1050,8 @@@ static int be_find_vfs(struct be_adapte u16 offset, stride;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
@@@ -1893,12 -1898,6 +1893,12 @@@ static int be_rx_cqs_create(struct be_a */ adapter->num_rx_qs = (num_irqs(adapter) > 1) ? num_irqs(adapter) + 1 : 1; + if (adapter->num_rx_qs != MAX_RX_QS) { + rtnl_lock(); + netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_qs); + rtnl_unlock(); + }
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { @@@ -2545,6 -2544,7 +2545,6 @@@ static int be_clear(struct be_adapter * be_cmd_fw_clean(adapter);
be_msix_disable(adapter); - pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0); return 0; }
@@@ -2602,8 -2602,8 +2602,8 @@@ static int be_vf_setup(struct be_adapte cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST; for_all_vfs(adapter, vf_cfg, vf) { - status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL, - &vf_cfg->if_handle, NULL, vf + 1); + status = be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); if (status) goto err; } @@@ -2643,43 -2643,29 +2643,43 @@@ static void be_setup_init(struct be_ada adapter->phy.forced_port_speed = -1; }
-static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) +static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, + bool *active_mac, u32 *pmac_id) { - u32 pmac_id; - int status; - bool pmac_id_active; + int status = 0;
- status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active, - &pmac_id, mac); - if (status != 0) - goto do_none; + if (!is_zero_ether_addr(adapter->netdev->perm_addr)) { + memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); + if (!lancer_chip(adapter) && !be_physfn(adapter)) + *active_mac = true; + else + *active_mac = false;
- if (pmac_id_active) { - status = be_cmd_mac_addr_query(adapter, mac, - MAC_ADDRESS_TYPE_NETWORK, - false, adapter->if_handle, pmac_id); + return status; + }
- if (!status) - adapter->pmac_id[0] = pmac_id; + if (lancer_chip(adapter)) { + status = be_cmd_get_mac_from_list(adapter, mac, + active_mac, pmac_id, 0); + if (*active_mac) { + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, + false, if_handle, + *pmac_id); + } + } else if (be_physfn(adapter)) { + /* For BE3, for PF get permanent MAC */ + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, true, + 0, 0); + *active_mac = false; } else { - status = be_cmd_pmac_add(adapter, mac, - adapter->if_handle, &adapter->pmac_id[0], 0); + /* For BE3, for VF get soft MAC assigned by PF*/ + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, false, + if_handle, 0); + *active_mac = true; } -do_none: return status; }
@@@ -2700,12 -2686,12 +2700,12 @@@ static int be_get_config(struct be_adap
static int be_setup(struct be_adapter *adapter) { - struct net_device *netdev = adapter->netdev; struct device *dev = &adapter->pdev->dev; u32 cap_flags, en_flags; u32 tx_fc, rx_fc; int status; u8 mac[ETH_ALEN]; + bool active_mac;
be_setup_init(adapter);
@@@ -2731,6 -2717,14 +2731,6 @@@ if (status) goto err;
- memset(mac, 0, ETH_ALEN); - status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, - true /*permanent */, 0, 0); - if (status) - return status; - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); - memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS | @@@ -2740,29 -2734,27 +2740,29 @@@ cap_flags |= BE_IF_FLAGS_RSS; en_flags |= BE_IF_FLAGS_RSS; } + status = be_cmd_if_create(adapter, cap_flags, en_flags, - netdev->dev_addr, &adapter->if_handle, - &adapter->pmac_id[0], 0); + &adapter->if_handle, 0); if (status != 0) goto err;
- /* The VF's permanent mac queried from card is incorrect. - * For BEx: Query the mac configued by the PF using if_handle - * For Lancer: Get and use mac_list to obtain mac address. - */ - if (!be_physfn(adapter)) { - if (lancer_chip(adapter)) - status = be_add_mac_from_list(adapter, mac); - else - status = be_cmd_mac_addr_query(adapter, mac, - MAC_ADDRESS_TYPE_NETWORK, false, - adapter->if_handle, 0); - if (!status) { - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); - memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } + memset(mac, 0, ETH_ALEN); + active_mac = false; + status = be_get_mac_addr(adapter, mac, adapter->if_handle, + &active_mac, &adapter->pmac_id[0]); + if (status != 0) + goto err; + + if (!active_mac) { + status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id[0], 0); + if (status != 0) + goto err; + } + + if (is_zero_ether_addr(adapter->netdev->dev_addr)) { + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); }
status = be_tx_qs_create(adapter); @@@ -2771,8 -2763,7 +2771,8 @@@
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- be_vid_config(adapter, false, 0); + if (adapter->vlans_added) + be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
@@@ -2782,6 -2773,8 +2782,6 @@@ be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc);
- pcie_set_readrq(adapter->pdev, 4096); - if (be_physfn(adapter) && num_vfs) { if (adapter->dev_num_vfs) be_vf_setup(adapter); @@@ -2795,6 -2788,8 +2795,6 @@@
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; - - pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1); return 0; err: be_clear(adapter); @@@ -3242,7 -3237,7 +3242,7 @@@ static void be_netdev_init(struct net_d
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, 65535); + netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
@@@ -3732,7 -3727,10 +3732,7 @@@ reschedule
static bool be_reset_required(struct be_adapter *adapter) { - u32 reg; - - pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, ®); - return reg; + return be_find_vfs(adapter, ENABLED) > 0 ? false : true; }
static int __devinit be_probe(struct pci_dev *pdev, @@@ -3751,7 -3749,7 +3751,7 @@@ goto disable_dev; pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS); + netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS); if (netdev == NULL) { status = -ENOMEM; goto rel_reg; diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 69a660b,cbb05d6..21e083c --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -790,10 -790,12 +790,10 @@@ static bool ixgbe_clean_tx_irq(struct i total_packets += tx_buffer->gso_segs;
#ifdef CONFIG_IXGBE_PTP - if (unlikely(tx_buffer->tx_flags & - IXGBE_TX_FLAGS_TSTAMP)) - ixgbe_ptp_tx_hwtstamp(q_vector, - tx_buffer->skb); - + if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) + ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); #endif + /* free the skb */ dev_kfree_skb_any(tx_buffer->skb);
@@@ -1146,7 -1148,7 +1146,7 @@@ static bool ixgbe_alloc_mapped_page(str
/* alloc new page for storage */ if (likely(!page)) { - page = alloc_pages(GFP_ATOMIC | __GFP_COLD, + page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; @@@ -1397,7 -1399,8 +1397,7 @@@ static void ixgbe_process_skb_fields(st ixgbe_rx_checksum(rx_ring, rx_desc, skb);
#ifdef CONFIG_IXGBE_PTP - if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)) - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); + ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); #endif
if ((dev->features & NETIF_F_HW_VLAN_RX) && @@@ -6380,12 -6383,17 +6380,12 @@@ static netdev_tx_t ixgbe_xmit_frame(str struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring;
- if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (skb->len < 17) { - if (skb_padto(skb, 17)) + if (unlikely(skb->len < 17)) { + if (skb_pad(skb, 17 - skb->len)) return NETDEV_TX_OK; skb->len = 17; } diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index cb7d1b2,dcebd12..daddd84 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@@ -26,7 -26,6 +26,7 @@@ *******************************************************************************/ #include "ixgbe.h" #include <linux/export.h> +#include <linux/ptp_classify.h>
/* * The 82599 and the X540 do not have true 64bit nanosecond scale @@@ -101,10 -100,6 +101,10 @@@ #define NSECS_PER_SEC 1000000000ULL #endif
+static struct sock_filter ptp_filter[] = { + PTP_FILTER +}; + /** * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) * @cc - the cyclecounter structure @@@ -312,14 -307,13 +312,14 @@@ void ixgbe_ptp_check_pps_event(struct i !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) return;
- switch (hw->mac.type) { - case ixgbe_mac_X540: - if (eicr & IXGBE_EICR_TIMESYNC) + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) { + switch (hw->mac.type) { + case ixgbe_mac_X540: ptp_clock_event(adapter->ptp_clock, &event); - break; - default: - break; + break; + default: + break; + } } }
@@@ -431,68 -425,6 +431,68 @@@ void ixgbe_ptp_overflow_check(struct ix }
/** + * ixgbe_ptp_match - determine if this skb matches a ptp packet + * @skb: pointer to the skb + * @hwtstamp: pointer to the hwtstamp_config to check + * + * Determine whether the skb should have been timestamped, assuming the + * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet + * should have a timestamp waiting in the registers, and 0 otherwise. + * + * V1 packets have to check the version type to determine whether they are + * correct. However, we can't directly access the data because it might be + * fragmented in the SKB, in paged memory. In order to work around this, we + * use skb_copy_bits which will properly copy the data whether it is in the + * paged memory fragments or not. We have to copy the IP header as well as the + * message type. + */ +static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter) +{ + struct iphdr iph; + u8 msgtype; + unsigned int type, offset; + + if (rx_filter == HWTSTAMP_FILTER_NONE) + return 0; + + type = sk_run_filter(skb, ptp_filter); + + if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT)) + return type & PTP_CLASS_V2; + + /* For the remaining cases actually check message type */ + switch (type) { + case PTP_CLASS_V1_IPV4: + skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph)); + offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL; + break; + case PTP_CLASS_V1_IPV6: + offset = OFF_PTP6 + OFF_PTP_CONTROL; + break; + default: + /* other cases invalid or handled above */ + return 0; + } + + /* Make sure our buffer is long enough */ + if (skb->len < offset) + return 0; + + skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype)); + + switch (rx_filter) { + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG); + break; + default: + return 0; + } +} + +/** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @q_vector: structure containing interrupt and ring information * @skb: particular skb to send timestamp with @@@ -541,7 -473,6 +541,7 @@@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe /** * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp * @q_vector: structure containing interrupt and ring information + * @rx_desc: the rx descriptor * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns @@@ -549,7 -480,6 +549,7 @@@ * is passed up the network stack */ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct ixgbe_adapter *adapter; @@@ -567,33 -497,21 +567,33 @@@ hw = &adapter->hw;
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + + /* Check if we have a valid timestamp and make sure the skb should + * have been timestamped */ + if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || + !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) + return; + + /* + * Always read the registers, in order to clear a possible fault + * because of stagnant RX timestamp values for a packet that never + * reached the queue. + */ regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
/* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. + * If the timestamp bit is set in the packet's descriptor, we know the + * timestamp belongs to this packet. No other packet can be + * timestamped until the registers for timestamping have been read. + * Therefor only one packet with this bit can be in the queue at a + * time, and the rx timestamp values that were in the registers belong + * to this packet. * * If nothing went wrong, then it should have a skb_shared_tx that we * can turn into a skb_shared_hwtstamps. */ - if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) return;
spin_lock_irqsave(&adapter->tmreg_lock, flags); @@@ -621,11 -539,6 +621,11 @@@ * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. */ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd) @@@ -669,30 -582,41 +669,30 @@@ tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; is_l4 = true; break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_EVENT: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: default: /* - * register RXMTRL must be set, therefore it is not - * possible to time stamp both V1 Sync and Delay_Req messages - * and hardware does not support timestamping all packets - * => return error + * register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages and hardware does not support + * timestamping all packets => return error */ + config.rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; }
@@@ -702,9 -626,6 +702,9 @@@ return 0; }
+ /* Store filter value for later use */ + adapter->rx_hwtstamp_filter = config.rx_filter; + /* define ethertype filter for timestamped packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), @@@ -787,6 -708,7 +787,7 @@@ void ixgbe_ptp_start_cyclecounter(struc { struct ixgbe_hw *hw = &adapter->hw; u32 incval = 0; + u32 timinca = 0; u32 shift = 0; u32 cycle_speed; unsigned long flags; @@@ -809,8 -731,16 +810,16 @@@ break; }
- /* Bail if the cycle speed didn't change */ - if (adapter->cycle_speed == cycle_speed) + /* + * grab the current TIMINCA value from the register so that it can be + * double checked. If the register value has been cleared, it must be + * reset to the correct value for generating a cyclecounter. If + * TIMINCA is zero, the SYSTIME registers do not increment at all. + */ + timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA); + + /* Bail if the cycle speed didn't change and TIMINCA is non-zero */ + if (adapter->cycle_speed == cycle_speed && timinca) return;
/* disable the SDP clock out */ @@@ -940,10 -870,6 +949,10 @@@ void ixgbe_ptp_init(struct ixgbe_adapte return; }
+ /* initialize the ptp filter */ + if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) + e_dev_warn("ptp_filter_init failed\n"); + spin_lock_init(&adapter->tmreg_lock);
ixgbe_ptp_start_cyclecounter(adapter); diff --combined drivers/net/usb/qmi_wwan.c index f1e7791,3767a12..68ca676 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@@ -1,10 -1,6 +1,10 @@@ /* * Copyright (c) 2012 Bjørn Mork bjorn@mork.no * + * The probing code is heavily inspired by cdc_ether, which is: + * Copyright (C) 2003-2005 by David Brownell + * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync) + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. @@@ -19,7 -15,11 +19,7 @@@ #include <linux/usb/usbnet.h> #include <linux/usb/cdc-wdm.h>
-/* The name of the CDC Device Management driver */ -#define DM_DRIVER "cdc_wdm" - -/* - * This driver supports wwan (3G/LTE/?) devices using a vendor +/* This driver supports wwan (3G/LTE/?) devices using a vendor * specific management protocol called Qualcomm MSM Interface (QMI) - * in addition to the more common AT commands over serial interface * management @@@ -31,99 -31,33 +31,99 @@@ * management protocol is used in place of the standard CDC * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE * + * Alternatively, control and data functions can be combined in a + * single USB interface. + * * Handling a protocol like QMI is out of the scope for any driver. - * It can be exported as a character device using the cdc-wdm driver, - * which will enable userspace applications ("modem managers") to - * handle it. This may be required to use the network interface - * provided by the driver. + * It is exported as a character device using the cdc-wdm driver as + * a subdriver, enabling userspace applications ("modem managers") to + * handle it. * * These devices may alternatively/additionally be configured using AT - * commands on any of the serial interfaces driven by the option driver - * - * This driver binds only to the data ("slave") interface to enable - * the cdc-wdm driver to bind to the control interface. It still - * parses the CDC functional descriptors on the control interface to - * a) verify that this is indeed a handled interface (CDC Union - * header lists it as slave) - * b) get MAC address and other ethernet config from the CDC Ethernet - * header - * c) enable user bind requests against the control interface, which - * is the common way to bind to CDC Ethernet Control Model type - * interfaces - * d) provide a hint to the user about which interface is the - * corresponding management interface + * commands on a serial interface */
+/* driver specific data */ +struct qmi_wwan_state { + struct usb_driver *subdriver; + atomic_t pmcount; + unsigned long unused; + struct usb_interface *control; + struct usb_interface *data; +}; + +/* using a counter to merge subdriver requests with our own into a combined state */ +static int qmi_wwan_manage_power(struct usbnet *dev, int on) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + int rv = 0; + + dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on); + + if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) { + /* need autopm_get/put here to ensure the usbcore sees the new value */ + rv = usb_autopm_get_interface(dev->intf); + if (rv < 0) + goto err; + dev->intf->needs_remote_wakeup = on; + usb_autopm_put_interface(dev->intf); + } +err: + return rv; +} + +static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on) +{ + struct usbnet *dev = usb_get_intfdata(intf); + return qmi_wwan_manage_power(dev, on); +} + +/* collect all three endpoints and register subdriver */ +static int qmi_wwan_register_subdriver(struct usbnet *dev) +{ + int rv; + struct usb_driver *subdriver = NULL; + struct qmi_wwan_state *info = (void *)&dev->data; + + /* collect bulk endpoints */ + rv = usbnet_get_endpoints(dev, info->data); + if (rv < 0) + goto err; + + /* update status endpoint if separate control interface */ + if (info->control != info->data) + dev->status = &info->control->cur_altsetting->endpoint[0]; + + /* require interrupt endpoint for subdriver */ + if (!dev->status) { + rv = -EINVAL; + goto err; + } + + /* for subdriver power management */ + atomic_set(&info->pmcount, 0); + + /* register subdriver */ + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); + if (IS_ERR(subdriver)) { + dev_err(&info->control->dev, "subdriver registration failed\n"); + rv = PTR_ERR(subdriver); + goto err; + } + + /* prevent usbnet from using status endpoint */ + dev->status = NULL; + + /* save subdriver struct for suspend/resume wrappers */ + info->subdriver = subdriver; + +err: + return rv; +} + static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) { int status = -1; - struct usb_interface *control = NULL; u8 *buf = intf->cur_altsetting->extra; int len = intf->cur_altsetting->extralen; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; @@@ -131,14 -65,25 +131,14 @@@ struct usb_cdc_ether_desc *cdc_ether = NULL; u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE; u32 found = 0; - atomic_t *pmcount = (void *)&dev->data[1]; + struct usb_driver *driver = driver_of(intf); + struct qmi_wwan_state *info = (void *)&dev->data;
- atomic_set(pmcount, 0); - - /* - * assume a data interface has no additional descriptors and - * that the control and data interface are numbered - * consecutively - this holds for the Huawei device at least - */ - if (len == 0 && desc->bInterfaceNumber > 0) { - control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1); - if (!control) - goto err; + BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
- buf = control->cur_altsetting->extra; - len = control->cur_altsetting->extralen; - dev_dbg(&intf->dev, "guessing "control" => %s, "data" => this\n", - dev_name(&control->dev)); - } + /* require a single interrupt status endpoint for subdriver */ + if (intf->cur_altsetting->desc.bNumEndpoints != 1) + goto err;
while (len > 3) { struct usb_descriptor_header *h = (void *)buf; @@@ -202,17 -147,10 +202,17 @@@ next_desc goto err; }
- /* give the user a helpful hint if trying to bind to the wrong interface */ - if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) { - dev_err(&intf->dev, "leaving "control" interface for " DM_DRIVER " - try binding to %s instead!\n", - dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev)); + /* verify CDC Union */ + if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { + dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); + goto err; + } + + /* need to save these for unbind */ + info->control = intf; + info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); + if (!info->data) { + dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); goto err; }
@@@ -222,29 -160,59 +222,29 @@@ usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); }
- /* success! point the user to the management interface */ - if (control) - dev_info(&intf->dev, "Use "" DM_DRIVER "" for QMI interface %s\n", - dev_name(&control->dev)); - - /* XXX: add a sysfs symlink somewhere to help management applications find it? */ + /* claim data interface and set it up */ + status = usb_driver_claim_interface(driver, info->data, dev); + if (status < 0) + goto err;
- /* collect bulk endpoints now that we know intf == "data" interface */ - status = usbnet_get_endpoints(dev, intf); + status = qmi_wwan_register_subdriver(dev); + if (status < 0) { + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver, info->data); + }
err: return status; }
/* Some devices combine the "control" and "data" functions into a * single interface with all three endpoints: interrupt + bulk in and * out - * - * Setting up cdc-wdm as a subdriver owning the interrupt endpoint - * will let it provide userspace access to the encapsulated QMI - * protocol without interfering with the usbnet operations. - */ + */ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) { int rv; - struct usb_driver *subdriver = NULL; - atomic_t *pmcount = (void *)&dev->data[1]; + struct qmi_wwan_state *info = (void *)&dev->data;
/* ZTE makes devices where the interface descriptors and endpoint * configurations of two or more interfaces are identical, even @@@ -260,62 -228,43 +260,39 @@@ goto err; }
- atomic_set(pmcount, 0); - - /* collect all three endpoints */ - rv = usbnet_get_endpoints(dev, intf); - if (rv < 0) - goto err; - - /* require interrupt endpoint for subdriver */ - if (!dev->status) { - rv = -EINVAL; - goto err; - } - - subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); - if (IS_ERR(subdriver)) { - rv = PTR_ERR(subdriver); - goto err; - } - - /* can't let usbnet use the interrupt endpoint */ - dev->status = NULL; - - /* save subdriver struct for suspend/resume wrappers */ - dev->data[0] = (unsigned long)subdriver; + /* control and data is shared */ + info->control = intf; + info->data = intf; + rv = qmi_wwan_register_subdriver(dev);
err: return rv; }
- /* Gobi devices uses identical class/protocol codes for all interfaces regardless - * of function. Some of these are CDC ACM like and have the exact same endpoints - * we are looking for. This leaves two possible strategies for identifying the - * correct interface: - * a) hardcoding interface number, or - * b) use the fact that the wwan interface is the only one lacking additional - * (CDC functional) descriptors - * - * Let's see if we can get away with the generic b) solution. - */ - static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf) - { - int rv = -EINVAL; - - /* ignore any interface with additional descriptors */ - if (intf->cur_altsetting->extralen) - goto err; - - rv = qmi_wwan_bind_shared(dev, intf); - err: - return rv; - } - -static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf) +static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) { - struct usb_driver *subdriver = (void *)dev->data[0]; - - if (subdriver && subdriver->disconnect) - subdriver->disconnect(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + struct usb_driver *driver = driver_of(intf); + struct usb_interface *other; + + if (info->subdriver && info->subdriver->disconnect) + info->subdriver->disconnect(info->control); + + /* allow user to unbind using either control or data */ + if (intf == info->control) + other = info->data; + else + other = info->control; + + /* only if not shared */ + if (other && intf != other) { + usb_set_intfdata(other, NULL); + usb_driver_release_interface(driver, other); + }
- dev->data[0] = (unsigned long)NULL; + info->subdriver = NULL; + info->data = NULL; + info->control = NULL; }
/* suspend/resume wrappers calling both usbnet and the cdc-wdm @@@ -327,15 -276,15 +304,15 @@@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); - struct usb_driver *subdriver = (void *)dev->data[0]; + struct qmi_wwan_state *info = (void *)&dev->data; int ret;
ret = usbnet_suspend(intf, message); if (ret < 0) goto err;
- if (subdriver && subdriver->suspend) - ret = subdriver->suspend(intf, message); + if (info->subdriver && info->subdriver->suspend) + ret = info->subdriver->suspend(intf, message); if (ret < 0) usbnet_resume(intf); err: @@@ -345,59 -294,68 +322,68 @@@ static int qmi_wwan_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct usb_driver *subdriver = (void *)dev->data[0]; + struct qmi_wwan_state *info = (void *)&dev->data; int ret = 0;
- if (subdriver && subdriver->resume) - ret = subdriver->resume(intf); + if (info->subdriver && info->subdriver->resume) + ret = info->subdriver->resume(intf); if (ret < 0) goto err; ret = usbnet_resume(intf); - if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend) - subdriver->suspend(intf, PMSG_SUSPEND); + if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend) + info->subdriver->suspend(intf, PMSG_SUSPEND); err: return ret; }
- static const struct driver_info qmi_wwan_info = { - .description = "QMI speaking wwan device", + .description = "WWAN/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, };
static const struct driver_info qmi_wwan_shared = { - .description = "QMI speaking wwan device with combined interface", + .description = "WWAN/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, + .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, };
- static const struct driver_info qmi_wwan_gobi = { - .description = "Qualcomm Gobi wwan/QMI device", + static const struct driver_info qmi_wwan_force_int0 = { + .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_gobi, + .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, + .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, + .data = BIT(0), /* interface whitelist bitmap */ };
- /* ZTE suck at making USB descriptors */ static const struct driver_info qmi_wwan_force_int1 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, + .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, .data = BIT(1), /* interface whitelist bitmap */ };
+ static const struct driver_info qmi_wwan_force_int3 = { + .description = "Qualcomm WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, ++ .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .data = BIT(3), /* interface whitelist bitmap */ + }; + static const struct driver_info qmi_wwan_force_int4 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, + .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, .data = BIT(4), /* interface whitelist bitmap */ }; @@@ -418,16 -376,23 +404,23 @@@ static const struct driver_info qmi_wwan_sierra = { .description = "Sierra Wireless wwan/QMI device", .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_gobi, + .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, + .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ };
#define HUAWEI_VENDOR_ID 0x12D1 + + /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ + #define QMI_GOBI1K_DEVICE(vend, prod) \ + USB_DEVICE(vend, prod), \ + .driver_info = (unsigned long)&qmi_wwan_force_int3 + + /* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ #define QMI_GOBI_DEVICE(vend, prod) \ USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_gobi + .driver_info = (unsigned long)&qmi_wwan_force_int0
static const struct usb_device_id products[] = { { /* Huawei E392, E398 and possibly others sharing both device id and more... */ @@@ -435,7 -400,7 +428,7 @@@ .idVendor = HUAWEI_VENDOR_ID, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, - .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ + .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ @@@ -443,7 -408,7 +436,7 @@@ .idVendor = HUAWEI_VENDOR_ID, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, - .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */ + .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Huawei E392, E398 and possibly others in "Windows mode" @@@ -538,20 -503,24 +531,24 @@@ .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_sierra, }, - {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ - {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ - {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ - {QMI_GOBI_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ - {QMI_GOBI_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ - {QMI_GOBI_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ - {QMI_GOBI_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ - {QMI_GOBI_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ - {QMI_GOBI_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ - {QMI_GOBI_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ - {QMI_GOBI_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ - {QMI_GOBI_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ - {QMI_GOBI_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ - {QMI_GOBI_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ + + /* Gobi 1000 devices */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ + {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ + {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ + {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ + + /* Gobi 2000 and 3000 devices */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ @@@ -593,7 -562,17 +590,7 @@@ static struct usb_driver qmi_wwan_drive .disable_hub_initiated_lpm = 1, };
-static int __init qmi_wwan_init(void) -{ - return usb_register(&qmi_wwan_driver); -} -module_init(qmi_wwan_init); - -static void __exit qmi_wwan_exit(void) -{ - usb_deregister(&qmi_wwan_driver); -} -module_exit(qmi_wwan_exit); +module_usb_driver(qmi_wwan_driver);
MODULE_AUTHOR("Bjørn Mork bjorn@mork.no"); MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver"); diff --combined drivers/net/usb/usbnet.c index ac2e493,aba769d..e92c057 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@@ -180,40 -180,7 +180,40 @@@ int usbnet_get_ethernet_addr(struct usb } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
-static void intr_complete (struct urb *urb); +static void intr_complete (struct urb *urb) +{ + struct usbnet *dev = urb->context; + int status = urb->status; + + switch (status) { + /* success */ + case 0: + dev->driver_info->status(dev, urb); + break; + + /* software-driven interface shutdown */ + case -ENOENT: /* urb killed */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "intr shutdown, code %d\n", status); + return; + + /* NOTE: not throttling like RX/TX, since this endpoint + * already polls infrequently + */ + default: + netdev_dbg(dev->net, "intr status %d\n", status); + break; + } + + if (!netif_running (dev->net)) + return; + + status = usb_submit_urb (urb, GFP_ATOMIC); + if (status != 0) + netif_err(dev, timer, dev->net, + "intr resubmit --> %d\n", status); +}
static int init_status (struct usbnet *dev, struct usb_interface *intf) { @@@ -552,6 -519,42 +552,6 @@@ block netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
-static void intr_complete (struct urb *urb) -{ - struct usbnet *dev = urb->context; - int status = urb->status; - - switch (status) { - /* success */ - case 0: - dev->driver_info->status(dev, urb); - break; - - /* software-driven interface shutdown */ - case -ENOENT: /* urb killed */ - case -ESHUTDOWN: /* hardware gone */ - netif_dbg(dev, ifdown, dev->net, - "intr shutdown, code %d\n", status); - return; - - /* NOTE: not throttling like RX/TX, since this endpoint - * already polls infrequently - */ - default: - netdev_dbg(dev->net, "intr status %d\n", status); - break; - } - - if (!netif_running (dev->net)) - return; - - memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); - status = usb_submit_urb (urb, GFP_ATOMIC); - if (status != 0) - netif_err(dev, timer, dev->net, - "intr resubmit --> %d\n", status); -} - /*-------------------------------------------------------------------------*/ void usbnet_pause_rx(struct usbnet *dev) { @@@ -793,11 -796,13 +793,13 @@@ int usbnet_open (struct net_device *net if (info->manage_power) { retval = info->manage_power(dev, 1); if (retval < 0) - goto done; + goto done_manage_power_error; usb_autopm_put_interface(dev->intf); } return retval;
+ done_manage_power_error: + clear_bit(EVENT_DEV_OPEN, &dev->flags); done: usb_autopm_put_interface(dev->intf); done_nopm: @@@ -873,9 -878,9 +875,9 @@@ void usbnet_get_drvinfo (struct net_dev { struct usbnet *dev = netdev_priv(net);
- strncpy (info->driver, dev->driver_name, sizeof info->driver); - strncpy (info->version, DRIVER_VERSION, sizeof info->version); - strncpy (info->fw_version, dev->driver_info->description, + strlcpy (info->driver, dev->driver_name, sizeof info->driver); + strlcpy (info->version, DRIVER_VERSION, sizeof info->version); + strlcpy (info->fw_version, dev->driver_info->description, sizeof info->fw_version); usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); } @@@ -1199,6 -1204,21 +1201,21 @@@ deferred } EXPORT_SYMBOL_GPL(usbnet_start_xmit);
+ static void rx_alloc_submit(struct usbnet *dev, gfp_t flags) + { + struct urb *urb; + int i; + + /* don't refill the queue all at once */ + for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { + urb = usb_alloc_urb(0, flags); + if (urb != NULL) { + if (rx_submit(dev, urb, flags) == -ENOLINK) + return; + } + } + } + /*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer @@@ -1238,26 -1258,14 +1255,14 @@@ static void usbnet_bh (unsigned long pa !timer_pending (&dev->delay) && !test_bit (EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; - int qlen = RX_QLEN (dev); - - if (temp < qlen) { - struct urb *urb; - int i; - - // don't refill the queue all at once - for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { - urb = usb_alloc_urb (0, GFP_ATOMIC); - if (urb != NULL) { - if (rx_submit (dev, urb, GFP_ATOMIC) == - -ENOLINK) - return; - } - } + + if (temp < RX_QLEN(dev)) { + rx_alloc_submit(dev, GFP_ATOMIC); if (temp != dev->rxq.qlen) netif_dbg(dev, link, dev->net, "rxqlen %d --> %d\n", temp, dev->rxq.qlen); - if (dev->rxq.qlen < qlen) + if (dev->rxq.qlen < RX_QLEN(dev)) tasklet_schedule (&dev->bh); } if (dev->txq.qlen < TX_QLEN (dev)) @@@ -1304,6 -1312,7 +1309,6 @@@ void usbnet_disconnect (struct usb_inte usb_free_urb(dev->interrupt);
free_netdev(net); - usb_put_dev (xdev); } EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@@ -1359,6 -1368,8 +1364,6 @@@ usbnet_probe (struct usb_interface *ude xdev = interface_to_usbdev (udev); interface = udev->cur_altsetting;
- usb_get_dev (xdev); - status = -ENOMEM;
// set up our own records @@@ -1487,6 -1498,7 +1492,6 @@@ out3 out1: free_netdev(net); out: - usb_put_dev(xdev); return status; } EXPORT_SYMBOL_GPL(usbnet_probe); @@@ -1506,6 -1518,7 +1511,7 @@@ int usbnet_suspend (struct usb_interfac spin_lock_irq(&dev->txq.lock); /* don't autosuspend while transmitting */ if (dev->txq.qlen && PMSG_IS_AUTO(message)) { + dev->suspend_count--; spin_unlock_irq(&dev->txq.lock); return -EBUSY; } else { @@@ -1562,6 -1575,13 +1568,13 @@@ int usbnet_resume (struct usb_interfac spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { + /* handle remote wakeup ASAP */ + if (!dev->wait && + netif_device_present(dev->net) && + !timer_pending(&dev->delay) && + !test_bit(EVENT_RX_HALT, &dev->flags)) + rx_alloc_submit(dev, GFP_KERNEL); + if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); tasklet_schedule (&dev->bh); diff --combined drivers/net/wireless/airo.c index 252c2c2,a747c63..f9f15bb --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@@ -1997,7 -1997,7 +1997,7 @@@ static int mpi_send_packet (struct net_ * ------------------------------------------------ */
- memcpy((char *)ai->txfids[0].virtual_host_addr, + memcpy(ai->txfids[0].virtual_host_addr, (char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr + @@@ -4212,7 -4212,7 +4212,7 @@@ static int PC4500_writerid(struct airo_ airo_print_err(ai->dev->name, "%s: len=%d", __func__, len); rc = -1; } else { - memcpy((char *)ai->config_desc.virtual_host_addr, + memcpy(ai->config_desc.virtual_host_addr, pBuf, len);
rc = issuecommand(ai, &cmd, &rsp); @@@ -7233,8 -7233,8 +7233,8 @@@ static int airo_get_aplist(struct net_d } } else { dwrq->flags = 1; /* Should be define'd */ - memcpy(extra + sizeof(struct sockaddr)*i, - &qual, sizeof(struct iw_quality)*i); + memcpy(extra + sizeof(struct sockaddr) * i, qual, + sizeof(struct iw_quality) * i); } dwrq->length = i;
diff --combined drivers/net/wireless/ath/ath9k/ath9k.h index a8c0500,4866550..f72c4a3 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@@ -214,6 -214,7 +214,7 @@@ struct ath_frame_info enum ath9k_key_type keytype; u8 keyix; u8 retries; + u8 rtscts_rate; };
struct ath_buf_state { @@@ -307,7 -308,6 +308,7 @@@ struct ath_rx u8 defant; u8 rxotherant; u32 *rxlink; + u32 num_pkts; unsigned int rxfilter; spinlock_t rxbuflock; struct list_head rxbuf; @@@ -326,9 -326,6 +327,9 @@@ int ath_rx_init(struct ath_softc *sc, i void ath_rx_cleanup(struct ath_softc *sc); int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); +void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq); +void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq); +void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq); void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); void ath_draintxq(struct ath_softc *sc, @@@ -418,9 -415,9 +419,9 @@@ int ath_beaconq_config(struct ath_soft void ath_set_beacon(struct ath_softc *sc); void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
-/*******/ -/* ANI */ -/*******/ +/*******************/ +/* Link Monitoring */ +/*******************/
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ #define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ @@@ -431,9 -428,7 +432,9 @@@ #define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
#define ATH_PAPRD_TIMEOUT 100 /* msecs */ +#define ATH_PLL_WORK_INTERVAL 100
+void ath_tx_complete_poll_work(struct work_struct *work); void ath_reset_work(struct work_struct *work); void ath_hw_check(struct work_struct *work); void ath_hw_pll_work(struct work_struct *work); @@@ -442,31 -437,22 +443,31 @@@ void ath_start_rx_poll(struct ath_soft void ath_paprd_calibrate(struct work_struct *work); void ath_ani_calibrate(unsigned long data); void ath_start_ani(struct ath_common *common); +int ath_update_survey_stats(struct ath_softc *sc); +void ath_update_survey_nf(struct ath_softc *sc, int channel);
/**********/ /* BTCOEX */ /**********/
+enum bt_op_flags { + BT_OP_PRIORITY_DETECTED, + BT_OP_SCAN, +}; + struct ath_btcoex { bool hw_timer_enabled; spinlock_t btcoex_lock; struct timer_list period_timer; /* Timer for BT period */ u32 bt_priority_cnt; unsigned long bt_priority_time; + unsigned long op_flags; int bt_stomp_type; /* Types of BT stomping */ u32 btcoex_no_stomp; /* in usec */ u32 btcoex_period; /* in usec */ u32 btscan_no_stomp; /* in usec */ u32 duty_cycle; + u32 bt_wait_time; struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ struct ath_mci_profile mci; }; @@@ -528,10 -514,8 +529,10 @@@ static inline void ath_deinit_leds(stru } #endif
- +/*******************************/ /* Antenna diversity/combining */ +/*******************************/ + #define ATH_ANT_RX_CURRENT_SHIFT 4 #define ATH_ANT_RX_MAIN_SHIFT 2 #define ATH_ANT_RX_MASK 0x3 @@@ -584,9 -568,6 +585,9 @@@ struct ath_ant_comb unsigned long scan_start_time; };
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); +void ath_ant_comb_update(struct ath_softc *sc); + /********************/ /* Main driver core */ /********************/ @@@ -604,15 -585,15 +605,15 @@@ #define ATH_TXPOWER_MAX 100 /* .5 dBm units */ #define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0) -#define SC_OP_BEACONS BIT(1) -#define SC_OP_OFFCHANNEL BIT(2) -#define SC_OP_RXFLUSH BIT(3) -#define SC_OP_TSF_RESET BIT(4) -#define SC_OP_BT_PRIORITY_DETECTED BIT(5) -#define SC_OP_BT_SCAN BIT(6) -#define SC_OP_ANI_RUN BIT(7) -#define SC_OP_PRIM_STA_VIF BIT(8) +enum sc_op_flags { + SC_OP_INVALID, + SC_OP_BEACONS, + SC_OP_RXFLUSH, + SC_OP_TSF_RESET, + SC_OP_ANI_RUN, + SC_OP_PRIM_STA_VIF, + SC_OP_HW_RESET, +};
/* Powersave flags */ #define PS_WAIT_FOR_BEACON BIT(0) @@@ -658,9 -639,9 +659,9 @@@ struct ath_softc struct completion paprd_complete;
unsigned int hw_busy_count; + unsigned long sc_flags;
u32 intrstatus; - u32 sc_flags; /* SC_OP_* */ u16 ps_flags; /* PS_* */ u16 curtxpow; bool ps_enabled; @@@ -698,7 -679,6 +699,7 @@@ #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT struct ath_btcoex btcoex; struct ath_mci_coex mci_coex; + struct work_struct mci_work; #endif
struct ath_descdma txsdma; @@@ -757,4 -737,5 +758,4 @@@ void ath9k_calculate_iter_data(struct i struct ieee80211_vif *vif, struct ath9k_vif_iter_data *iter_data);
- #endif /* ATH9K_H */ diff --combined drivers/net/wireless/ath/ath9k/hw.c index 784baee,1c68e56..a42c26f --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@@ -390,6 -390,14 +390,6 @@@ static void ath9k_hw_disablepcie(struc REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); }
-static void ath9k_hw_aspm_init(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (common->bus_ops->aspm_init) - common->bus_ops->aspm_init(common); -} - /* This should work for all families including legacy */ static bool ath9k_hw_chip_test(struct ath_hw *ah) { @@@ -685,6 -693,9 +685,6 @@@ static int __ath9k_hw_init(struct ath_h if (r) return r;
- if (ah->is_pciexpress) - ath9k_hw_aspm_init(ah); - r = ath9k_hw_init_macaddr(ah); if (r) { ath_err(common, "Failed to initialize MAC address\n"); @@@ -773,13 -784,25 +773,25 @@@ static void ath9k_hw_init_qos(struct at
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) { + struct ath_common *common = ath9k_hw_common(ah); + int i = 0; + REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); udelay(100); REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) + while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) { + udelay(100);
+ if (WARN_ON_ONCE(i >= 100)) { + ath_err(common, "PLL4 meaurement not done\n"); + break; + } + + i++; + } + return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; } EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); @@@ -1348,9 -1371,6 +1360,9 @@@ static bool ath9k_hw_set_reset(struct a } }
+ if (ath9k_hw_mci_is_enabled(ah)) + ar9003_mci_check_gpm_offset(ah); + REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah); @@@ -1435,6 -1455,9 +1447,6 @@@ static bool ath9k_hw_set_reset_reg(stru break; }
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - return ret; }
@@@ -1710,8 -1733,8 +1722,8 @@@ static int ath9k_hw_do_fastcc(struct at ath9k_hw_loadnf(ah, ah->curchan); ath9k_hw_start_nfcal(ah, true);
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah)) - ar9003_mci_2g5g_switch(ah, true); + if (ath9k_hw_mci_is_enabled(ah)) + ar9003_mci_2g5g_switch(ah, false);
if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); @@@ -1731,9 -1754,10 +1743,9 @@@ int ath9k_hw_reset(struct ath_hw *ah, s u64 tsf = 0; int i, r; bool start_mci_reset = false; - bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI); bool save_fullsleep = ah->chip_fullsleep;
- if (mci) { + if (ath9k_hw_mci_is_enabled(ah)) { start_mci_reset = ar9003_mci_start_reset(ah, chan); if (start_mci_reset) return 0; @@@ -1762,7 -1786,7 +1774,7 @@@ return r; }
- if (mci) + if (ath9k_hw_mci_is_enabled(ah)) ar9003_mci_stop_bt(ah, save_fullsleep);
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); @@@ -1820,7 -1844,7 +1832,7 @@@ if (r) return r;
- if (mci) + if (ath9k_hw_mci_is_enabled(ah)) ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
/* @@@ -1915,8 -1939,7 +1927,8 @@@
ath9k_hw_set_dma(ah);
- REG_WRITE(ah, AR_OBS, 8); + if (!ath9k_hw_mci_is_enabled(ah)) + REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) { REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); @@@ -1940,7 -1963,7 +1952,7 @@@ ath9k_hw_loadnf(ah, chan); ath9k_hw_start_nfcal(ah, true);
- if (mci && ar9003_mci_end_reset(ah, chan, caldata)) + if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata)) return -EIO;
ENABLE_REGWRITE_BUFFER(ah); @@@ -1985,7 -2008,7 +1997,7 @@@ if (ath9k_hw_btcoex_is_enabled(ah)) ath9k_hw_btcoex_enable(ah);
- if (mci) + if (ath9k_hw_mci_is_enabled(ah)) ar9003_mci_check_bt(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) { @@@ -2008,35 -2031,39 +2020,35 @@@ EXPORT_SYMBOL(ath9k_hw_reset) * Notify Power Mgt is disabled in self-generated frames. * If requested, force chip to sleep. */ -static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) +static void ath9k_set_power_sleep(struct ath_hw *ah) { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); - if (setChip) { - if (AR_SREV_9462(ah)) { - REG_WRITE(ah, AR_TIMER_MODE, - REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00); - REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah, - AR_NDP2_TIMER_MODE) & 0xFFFFFF00); - REG_WRITE(ah, AR_SLP32_INC, - REG_READ(ah, AR_SLP32_INC) & 0xFFF00000); - /* xxx Required for WLAN only case ? */ - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); - udelay(100); - }
- /* - * Clear the RTC force wake bit to allow the - * mac to go to sleep. - */ - REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); + if (AR_SREV_9462(ah)) { + REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff); + REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff); + REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff); + /* xxx Required for WLAN only case ? */ + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); + udelay(100); + }
- if (AR_SREV_9462(ah)) - udelay(100); + /* + * Clear the RTC force wake bit to allow the + * mac to go to sleep. + */ + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + if (ath9k_hw_mci_is_enabled(ah)) + udelay(100);
- /* Shutdown chip. Active low */ - if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { - REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); - udelay(2); - } + if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + + /* Shutdown chip. Active low */ + if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { + REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); + udelay(2); }
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ @@@ -2049,38 -2076,44 +2061,38 @@@ * frames. If request, set power mode of chip to * auto/normal. Duration in units of 128us (1/8 TU). */ -static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) +static void ath9k_set_power_network_sleep(struct ath_hw *ah) { - u32 val; + struct ath9k_hw_capabilities *pCap = &ah->caps;
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); - if (setChip) { - struct ath9k_hw_capabilities *pCap = &ah->caps;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { - /* Set WakeOnInterrupt bit; clear ForceWake bit */ - REG_WRITE(ah, AR_RTC_FORCE_WAKE, - AR_RTC_FORCE_WAKE_ON_INT); - } else { + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + /* Set WakeOnInterrupt bit; clear ForceWake bit */ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_ON_INT); + } else {
- /* When chip goes into network sleep, it could be waken - * up by MCI_INT interrupt caused by BT's HW messages - * (LNA_xxx, CONT_xxx) which chould be in a very fast - * rate (~100us). This will cause chip to leave and - * re-enter network sleep mode frequently, which in - * consequence will have WLAN MCI HW to generate lots of - * SYS_WAKING and SYS_SLEEPING messages which will make - * BT CPU to busy to process. - */ - if (AR_SREV_9462(ah)) { - val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) & - ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK; - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val); - } - /* - * Clear the RTC force wake bit to allow the - * mac to go to sleep. - */ - REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, - AR_RTC_FORCE_WAKE_EN); - - if (AR_SREV_9462(ah)) - udelay(30); - } + /* When chip goes into network sleep, it could be waken + * up by MCI_INT interrupt caused by BT's HW messages + * (LNA_xxx, CONT_xxx) which chould be in a very fast + * rate (~100us). This will cause chip to leave and + * re-enter network sleep mode frequently, which in + * consequence will have WLAN MCI HW to generate lots of + * SYS_WAKING and SYS_SLEEPING messages which will make + * BT CPU to busy to process. + */ + if (ath9k_hw_mci_is_enabled(ah)) + REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN, + AR_MCI_INTERRUPT_RX_HW_MSG_MASK); + /* + * Clear the RTC force wake bit to allow the + * mac to go to sleep. + */ + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); + + if (ath9k_hw_mci_is_enabled(ah)) + udelay(30); }
/* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ @@@ -2088,7 -2121,7 +2100,7 @@@ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); }
-static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) +static bool ath9k_hw_set_power_awake(struct ath_hw *ah) { u32 val; int i; @@@ -2099,38 -2132,37 +2111,38 @@@ udelay(10); }
- if (setChip) { - if ((REG_READ(ah, AR_RTC_STATUS) & - AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { - if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { - return false; - } - if (!AR_SREV_9300_20_OR_LATER(ah)) - ath9k_hw_init_pll(ah, NULL); + if ((REG_READ(ah, AR_RTC_STATUS) & + AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { + return false; } - if (AR_SREV_9100(ah)) - REG_SET_BIT(ah, AR_RTC_RESET, - AR_RTC_RESET_EN); + if (!AR_SREV_9300_20_OR_LATER(ah)) + ath9k_hw_init_pll(ah, NULL); + } + if (AR_SREV_9100(ah)) + REG_SET_BIT(ah, AR_RTC_RESET, + AR_RTC_RESET_EN);
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + udelay(50); + + if (ath9k_hw_mci_is_enabled(ah)) + ar9003_mci_set_power_awake(ah); + + for (i = POWER_UP_TIME / 50; i > 0; i--) { + val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; + if (val == AR_RTC_STATUS_ON) + break; + udelay(50); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); - udelay(50); - - for (i = POWER_UP_TIME / 50; i > 0; i--) { - val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; - if (val == AR_RTC_STATUS_ON) - break; - udelay(50); - REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, - AR_RTC_FORCE_WAKE_EN); - } - if (i == 0) { - ath_err(ath9k_hw_common(ah), - "Failed to wakeup in %uus\n", - POWER_UP_TIME / 20); - return false; - } + } + if (i == 0) { + ath_err(ath9k_hw_common(ah), + "Failed to wakeup in %uus\n", + POWER_UP_TIME / 20); + return false; }
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); @@@ -2141,7 -2173,7 +2153,7 @@@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) { struct ath_common *common = ath9k_hw_common(ah); - int status = true, setChip = true; + int status = true; static const char *modes[] = { "AWAKE", "FULL-SLEEP", @@@ -2157,17 -2189,25 +2169,17 @@@
switch (mode) { case ATH9K_PM_AWAKE: - status = ath9k_hw_set_power_awake(ah, setChip); - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - + status = ath9k_hw_set_power_awake(ah); break; case ATH9K_PM_FULL_SLEEP: - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + if (ath9k_hw_mci_is_enabled(ah)) ar9003_mci_set_full_sleep(ah);
- ath9k_set_power_sleep(ah, setChip); + ath9k_set_power_sleep(ah); ah->chip_fullsleep = true; break; case ATH9K_PM_NETWORK_SLEEP: - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - - ath9k_set_power_network_sleep(ah, setChip); + ath9k_set_power_network_sleep(ah); break; default: ath_err(common, "Unknown power mode %u\n", mode); @@@ -2737,9 -2777,6 +2749,9 @@@ EXPORT_SYMBOL(ath9k_hw_setrxfilter)
bool ath9k_hw_phy_disable(struct ath_hw *ah) { + if (ath9k_hw_mci_is_enabled(ah)) + ar9003_mci_bt_gain_ctrl(ah); + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) return false;
diff --combined drivers/net/wireless/ath/ath9k/xmit.c index f777ddc,4d57139..8d83060 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@@ -105,19 -105,19 +105,19 @@@ static int ath_max_4ms_framelen[4][32] /* Aggregation logic */ /*********************/
-static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) +void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) __acquires(&txq->axq_lock) { spin_lock_bh(&txq->axq_lock); }
-static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) +void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) __releases(&txq->axq_lock) { spin_unlock_bh(&txq->axq_lock); }
-static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) +void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) __releases(&txq->axq_lock) { struct sk_buff_head q; @@@ -938,6 -938,7 +938,7 @@@ static void ath_buf_set_rate(struct ath struct ieee80211_tx_rate *rates; const struct ieee80211_rate *rate; struct ieee80211_hdr *hdr; + struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); int i; u8 rix = 0;
@@@ -948,18 -949,7 +949,7 @@@
/* set dur_update_en for l-sig computation except for PS-Poll frames */ info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); - - /* - * We check if Short Preamble is needed for the CTS rate by - * checking the BSS's global flag. - * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. - */ - rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); - info->rtscts_rate = rate->hw_value; - - if (tx_info->control.vif && - tx_info->control.vif->bss_conf.use_short_preamble) - info->rtscts_rate |= rate->hw_value_short; + info->rtscts_rate = fi->rtscts_rate;
for (i = 0; i < 4; i++) { bool is_40, is_sgi, is_sp; @@@ -1001,13 -991,13 +991,13 @@@ }
/* legacy rates */ + rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; if ((tx_info->band == IEEE80211_BAND_2GHZ) && !(rate->flags & IEEE80211_RATE_ERP_G)) phy = WLAN_RC_PHY_CCK; else phy = WLAN_RC_PHY_OFDM;
- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; info->rates[i].Rate = rate->hw_value; if (rate->hw_value_short) { if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) @@@ -1536,7 -1526,7 +1526,7 @@@ bool ath_drain_all_txq(struct ath_soft int i; u32 npend = 0;
- if (sc->sc_flags & SC_OP_INVALID) + if (test_bit(SC_OP_INVALID, &sc->sc_flags)) return true;
ath9k_hw_abort_tx_dma(ah); @@@ -1776,10 -1766,22 +1766,22 @@@ static void setup_frame_info(struct iee struct ieee80211_sta *sta = tx_info->control.sta; struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + const struct ieee80211_rate *rate; struct ath_frame_info *fi = get_frame_info(skb); struct ath_node *an = NULL; enum ath9k_key_type keytype; + bool short_preamble = false; + + /* + * We check if Short Preamble is needed for the CTS rate by + * checking the BSS's global flag. + * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. + */ + if (tx_info->control.vif && + tx_info->control.vif->bss_conf.use_short_preamble) + short_preamble = true;
+ rate = ieee80211_get_rts_cts_rate(hw, tx_info); keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (sta) @@@ -1794,6 -1796,9 +1796,9 @@@ fi->keyix = ATH9K_TXKEYIX_INVALID; fi->keytype = keytype; fi->framelen = framelen; + fi->rtscts_rate = rate->hw_value; + if (short_preamble) + fi->rtscts_rate |= rate->hw_value_short; }
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) @@@ -1994,7 -1999,6 +1999,7 @@@ static void ath_tx_complete(struct ath_ struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; int q, padpos, padsize; + unsigned long flags;
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@@ -2013,7 -2017,6 +2018,7 @@@ skb_pull(skb, padsize); }
+ spin_lock_irqsave(&sc->sc_pm_lock, flags); if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; ath_dbg(common, PS, @@@ -2023,7 -2026,6 +2028,7 @@@ PS_WAIT_FOR_PSPOLL_DATA | PS_WAIT_FOR_TX_ACK)); } + spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
q = skb_get_queue_mapping(skb); if (txq == sc->tx.txq_map[q]) { @@@ -2234,6 -2236,46 +2239,6 @@@ static void ath_tx_processq(struct ath_ ath_txq_unlock_complete(sc, txq); }
-static void ath_tx_complete_poll_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, - tx_complete_work.work); - struct ath_txq *txq; - int i; - bool needreset = false; -#ifdef CONFIG_ATH9K_DEBUGFS - sc->tx_complete_poll_work_seen++; -#endif - - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) - if (ATH_TXQ_SETUP(sc, i)) { - txq = &sc->tx.txq[i]; - ath_txq_lock(sc, txq); - if (txq->axq_depth) { - if (txq->axq_tx_inprogress) { - needreset = true; - ath_txq_unlock(sc, txq); - break; - } else { - txq->axq_tx_inprogress = true; - } - } - ath_txq_unlock_complete(sc, txq); - } - - if (needreset) { - ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, - "tx hung, resetting the chip\n"); - RESET_STAT_INC(sc, RESET_TYPE_TX_HANG); - ieee80211_queue_work(sc->hw, &sc->hw_reset_work); - } - - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, - msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); -} - - - void ath_tx_tasklet(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; diff --combined drivers/net/wireless/iwlwifi/dvm/debugfs.c index 8a2d9e6,0000000..b0eff1c mode 100644,000000..100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@@ -1,2426 -1,0 +1,2432 @@@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless ilw@linux.intel.com + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/ieee80211.h> +#include <net/mac80211.h> +#include "iwl-debug.h" +#include "iwl-io.h" +#include "dev.h" +#include "agn.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, priv, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_u32(#name, mode, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ +static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ +static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos); + + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t iwl_dbgfs_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + u32 val = 0; + char *buf; + ssize_t ret; + int i = 0; + bool device_format = false; + int offset = 0; + int len = 0; + int pos = 0; + int sram; + struct iwl_priv *priv = file->private_data; + const struct fw_img *img; + size_t bufsz; + + /* default is to dump the entire data segment */ + if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + priv->dbgfs_sram_offset = 0x800000; + if (!priv->ucode_loaded) + return -EINVAL; + img = &priv->fw->img[priv->cur_ucode]; + priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + } + len = priv->dbgfs_sram_len; + + if (len == -4) { + device_format = true; + len = 4; + } + + bufsz = 50 + len * 4; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + priv->dbgfs_sram_offset); + + /* adjust sram address since reads are only on even u32 boundaries */ + offset = priv->dbgfs_sram_offset & 0x3; + sram = priv->dbgfs_sram_offset & ~0x3; + + /* read the first u32 from sram */ + val = iwl_read_targ_mem(priv->trans, sram); + + for (; len; len--) { + /* put the address at the start of every line */ + if (i == 0) + pos += scnprintf(buf + pos, bufsz - pos, + "%08X: ", sram + offset); + + if (device_format) + pos += scnprintf(buf + pos, bufsz - pos, + "%02x", (val >> (8 * (3 - offset))) & 0xff); + else + pos += scnprintf(buf + pos, bufsz - pos, + "%02x ", (val >> (8 * offset)) & 0xff); + + /* if all bytes processed, read the next u32 from sram */ + if (++offset == 4) { + sram += 4; + offset = 0; + val = iwl_read_targ_mem(priv->trans, sram); + } + + /* put in extra spaces and split lines for human readability */ + if (++i == 16) { + i = 0; + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } else if (!(i & 7)) { + pos += scnprintf(buf + pos, bufsz - pos, " "); + } else if (!(i & 3)) { + pos += scnprintf(buf + pos, bufsz - pos, " "); + } + } + if (i) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = len; + } else if (sscanf(buf, "%x", &offset) == 1) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = -4; + } else { + priv->dbgfs_sram_offset = 0; + priv->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN]; + + if (!priv->wowlan_sram) + return -ENODATA; + + return simple_read_from_buffer(user_buf, count, ppos, + priv->wowlan_sram, + img->sec[IWL_UCODE_SECTION_DATA].len); +} +static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_station_entry *station; + struct iwl_tid_data *tid_data; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + priv->num_stations); + + for (i = 0; i < IWLAGN_STATION_COUNT; i++) { + station = &priv->stations[i]; + if (!station->used) + continue; + pos += scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", + i, station->sta.sta.addr, + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, + "TID seqno next_rclmd " + "rate_n_flags state txq\n"); + + for (j = 0; j < IWL_MAX_TID_COUNT; j++) { + tid_data = &priv->tid_data[i][j]; + pos += scnprintf(buf + pos, bufsz - pos, + "%d: 0x%.4x 0x%.4x 0x%.8x " + "%d %.2d", + j, tid_data->seq_number, + tid_data->next_reclaimed, + tid_data->agg.rate_n_flags, + tid_data->agg.state, + tid_data->agg.txq_id); + + if (tid_data->agg.wait_for_ba) + pos += scnprintf(buf + pos, bufsz - pos, + " - waitforba"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_nvm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 eeprom_ver; + size_t eeprom_len = priv->eeprom_blob_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) + return -ENODATA; + + ptr = priv->eeprom_blob; + if (!ptr) + return -ENOMEM; + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + eeprom_ver = priv->eeprom_data->eeprom_version; + pos += scnprintf(buf + pos, buf_size - pos, + "NVM version: 0x%x\n", eeprom_ver); + for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, + buf_size - pos, 0); + pos += strlen(buf + pos); + if (buf_size - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", + test_bit(STATUS_CT_KILL, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < REPLY_MAX; cnt++) { + if (priv->rx_handlers_stats[cnt] > 0) + pos += scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + iwl_dvm_get_cmd_string(cnt), + priv->rx_handlers_stats[cnt]); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + memset(&priv->rx_handlers_stats[0], 0, + sizeof(priv->rx_handlers_stats)); + + return count; +} + +static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_rxon_context *ctx; + int pos = 0, i; + char buf[256 * NUM_IWL_RXON_CTX]; + const size_t bufsz = sizeof(buf); + + for_each_context(priv, ctx) { + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", + ctx->ctxid); + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + ctx->qos_data.def_qos_parm.ac[i].cw_min, + ctx->qos_data.def_qos_parm.ac[i].cw_max, + ctx->qos_data.def_qos_parm.ac[i].aifsn, + ctx->qos_data.def_qos_parm.ac[i].edca_txop); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "Thermal Throttling Mode: %s\n", + tt->advanced_tt ? "Advance" : "Legacy"); + pos += scnprintf(buf + pos, bufsz - pos, + "Thermal Throttling State: %d\n", + tt->state); + if (tt->advanced_tt) { + restriction = tt->restriction + tt->state; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx mode: %d\n", + restriction->tx_stream); + pos += scnprintf(buf + pos, bufsz - pos, + "Rx mode: %d\n", + restriction->rx_stream); + pos += scnprintf(buf + pos, bufsz - pos, + "HT mode: %d\n", + restriction->is_ht); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!iwl_is_any_associated(priv)) + priv->disable_ht40 = ht40 ? true : false; + else + return -EINVAL; + + return count; +} + +static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "11n 40MHz Mode: %s\n", + priv->disable_ht40 ? "Disabled" : "Enabled"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_temperature_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + + +static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int value; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%d", &value) != 1) + return -EINVAL; + + /* + * Our users expect 0 to be "CAM", but 0 isn't actually + * valid here. However, let's not confuse them and present + * IWL_POWER_INDEX_1 as "1", not "0". + */ + if (value == 0) + return -EINVAL; + else if (value > 0) + value -= 1; + + if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) + return -EINVAL; + + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + priv->power_data.debug_sleep_level_override = value; + + mutex_lock(&priv->mutex); + iwl_power_update_mode(priv, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[10]; + int pos, value; + const size_t bufsz = sizeof(buf); + + /* see the write function */ + value = priv->power_data.debug_sleep_level_override; + if (value >= 0) + value += 1; + + pos = scnprintf(buf, bufsz, "%d\n", value); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[200]; + int pos = 0, i; + const size_t bufsz = sizeof(buf); + struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd; + + pos += scnprintf(buf + pos, bufsz - pos, + "flags: %#.2x\n", le16_to_cpu(cmd->flags)); + pos += scnprintf(buf + pos, bufsz - pos, + "RX/TX timeout: %d/%d usec\n", + le32_to_cpu(cmd->rx_data_timeout), + le32_to_cpu(cmd->tx_data_timeout)); + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "sleep_interval[%d]: %d\n", i, + le32_to_cpu(cmd->sleep_interval[i])); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_FILE_OPS(wowlan_sram); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_FILE_OPS(thermal_throttling); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); +DEBUGFS_READ_FILE_OPS(temperature); +DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); +DEBUGFS_READ_FILE_OPS(current_sleep_command); + +static const char *fmt_value = " %-30s %10u\n"; +static const char *fmt_hex = " %-30s 0x%02X\n"; +static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +static const char *fmt_header = + "%-32s current cumulative delta max\n"; + +static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + lockdep_assert_held(&priv->statistics.lock); + + flag = le32_to_cpu(priv->statistics.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + + return p; +} + +static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_rx_phy) * 40 + + sizeof(struct statistics_rx_non_phy) * 40 + + sizeof(struct statistics_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct statistics_rx_non_phy *general, *accum_general; + struct statistics_rx_non_phy *delta_general, *max_general; + struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + spin_lock_bh(&priv->statistics.lock); + ofdm = &priv->statistics.rx_ofdm; + cck = &priv->statistics.rx_cck; + general = &priv->statistics.rx_non_phy; + ht = &priv->statistics.rx_ofdm_ht; + accum_ofdm = &priv->accum_stats.rx_ofdm; + accum_cck = &priv->accum_stats.rx_cck; + accum_general = &priv->accum_stats.rx_non_phy; + accum_ht = &priv->accum_stats.rx_ofdm_ht; + delta_ofdm = &priv->delta_stats.rx_ofdm; + delta_cck = &priv->delta_stats.rx_cck; + delta_general = &priv->delta_stats.rx_non_phy; + delta_ht = &priv->delta_stats.rx_ofdm_ht; + max_ofdm = &priv->max_delta_stats.rx_ofdm; + max_cck = &priv->max_delta_stats.rx_cck; + max_general = &priv->max_delta_stats.rx_non_phy; + max_ht = &priv->max_delta_stats.rx_ofdm_ht; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, + max_ofdm->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, + delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, + delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, + delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, delta_cck->overrun_err, + max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, delta_cck->sfd_timeout, + max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, delta_cck->fina_timeout, + max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, + delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), + accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, + max_cck->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(cck->mh_format_err), + accum_cck->mh_format_err, delta_cck->mh_format_err, + max_cck->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, delta_general->bogus_cts, + max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, delta_general->bogus_ack, + max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, + delta_general->channel_load, + max_general->channel_load); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, + max_general->beacon_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, + max_general->beacon_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, + max_general->beacon_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM_HT:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, + delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ht->mh_format_err), + accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), + accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), + accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), + accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_tx) * 48) + 250; + ssize_t ret; + struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + spin_lock_bh(&priv->statistics.lock); + + tx = &priv->statistics.tx; + accum_tx = &priv->accum_stats.tx; + delta_tx = &priv->delta_stats.tx; + max_tx = &priv->max_delta_stats.tx; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), + accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, + max_tx->dump_msdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), + accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, + max_tx->agg.ba_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, + delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg underrun:", + le32_to_cpu(tx->agg.underrun), + accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, + delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, + delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { + pos += scnprintf(buf + pos, bufsz - pos, + "tx power: (1/2 dB step)\n"); + if ((priv->eeprom_data->valid_tx_ant & ANT_A) && + tx->tx_power.ant_a) + pos += scnprintf(buf + pos, bufsz - pos, + fmt_hex, "antenna A:", + tx->tx_power.ant_a); + if ((priv->eeprom_data->valid_tx_ant & ANT_B) && + tx->tx_power.ant_b) + pos += scnprintf(buf + pos, bufsz - pos, + fmt_hex, "antenna B:", + tx->tx_power.ant_b); + if ((priv->eeprom_data->valid_tx_ant & ANT_C) && + tx->tx_power.ant_c) + pos += scnprintf(buf + pos, bufsz - pos, + fmt_hex, "antenna C:", + tx->tx_power.ant_c); + } + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_general) * 10 + 300; + ssize_t ret; + struct statistics_general_common *general, *accum_general; + struct statistics_general_common *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + + spin_lock_bh(&priv->statistics.lock); + + general = &priv->statistics.common; + dbg = &priv->statistics.common.dbg; + div = &priv->statistics.common.div; + accum_general = &priv->accum_stats.common; + accum_dbg = &priv->accum_stats.common.dbg; + accum_div = &priv->accum_stats.common.div; + delta_general = &priv->delta_stats.common; + max_general = &priv->max_delta_stats.common; + delta_dbg = &priv->delta_stats.common.dbg; + max_dbg = &priv->max_delta_stats.common.dbg; + delta_div = &priv->delta_stats.common.div; + max_div = &priv->max_delta_stats.common.div; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature:", + le32_to_cpu(general->temperature)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature_m:", + le32_to_cpu(general->temperature_m)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "ttl_timestamp:", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200; + ssize_t ret; + struct statistics_bt_activity *bt, *accum_bt; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + if (!priv->bt_enable_flag) + return -EINVAL; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + ret = iwl_send_statistics_request(priv, CMD_SYNC, false); + mutex_unlock(&priv->mutex); + + if (ret) + return -EAGAIN; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + + spin_lock_bh(&priv->statistics.lock); + + bt = &priv->statistics.bt_activity; + accum_bt = &priv->accum_stats.bt_activity; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\t\t\tcurrent\t\t\taccumulative\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_tx_req_cnt), + accum_bt->hi_priority_tx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_tx_denied_cnt), + accum_bt->hi_priority_tx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_tx_req_cnt), + accum_bt->lo_priority_tx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_tx_denied_cnt), + accum_bt->lo_priority_tx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_rx_req_cnt), + accum_bt->hi_priority_rx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_rx_denied_cnt), + accum_bt->hi_priority_rx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_rx_req_cnt), + accum_bt->lo_priority_rx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_rx_denied_cnt), + accum_bt->lo_priority_rx_denied_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, + "(rx)num_bt_kills:\t\t%u\t\t\t%u\n", + le32_to_cpu(priv->statistics.num_bt_kills), + priv->statistics.accum_num_bt_kills); + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) + + (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200; + ssize_t ret; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY), + priv->reply_tx_stats.pp_delay); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES), + priv->reply_tx_stats.pp_few_bytes); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO), + priv->reply_tx_stats.pp_bt_prio); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD), + priv->reply_tx_stats.pp_quiet_period); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK), + priv->reply_tx_stats.pp_calc_ttak); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_tx_fail_reason( + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY), + priv->reply_tx_stats.int_crossed_retry); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT), + priv->reply_tx_stats.short_limit); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT), + priv->reply_tx_stats.long_limit); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN), + priv->reply_tx_stats.fifo_underrun); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW), + priv->reply_tx_stats.drain_flow); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH), + priv->reply_tx_stats.rfkill_flush); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE), + priv->reply_tx_stats.life_expire); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS), + priv->reply_tx_stats.dest_ps); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED), + priv->reply_tx_stats.host_abort); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY), + priv->reply_tx_stats.pp_delay); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID), + priv->reply_tx_stats.sta_invalid); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED), + priv->reply_tx_stats.frag_drop); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE), + priv->reply_tx_stats.tid_disable); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED), + priv->reply_tx_stats.fifo_flush); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_tx_fail_reason( + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL), + priv->reply_tx_stats.insuff_cf_poll); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX), + priv->reply_tx_stats.fail_hw_drop); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_tx_fail_reason( + TX_STATUS_FAIL_NO_BEACON_ON_RADAR), + priv->reply_tx_stats.sta_color_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", + priv->reply_tx_stats.unknown); + + pos += scnprintf(buf + pos, bufsz - pos, + "\nStatistics_Agg_TX_Error:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK), + priv->reply_agg_tx_stats.underrun); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK), + priv->reply_agg_tx_stats.bt_prio); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK), + priv->reply_agg_tx_stats.few_bytes); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK), + priv->reply_agg_tx_stats.abort); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_LAST_SENT_TTL_MSK), + priv->reply_agg_tx_stats.last_sent_ttl); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK), + priv->reply_agg_tx_stats.last_sent_try); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK), + priv->reply_agg_tx_stats.last_sent_bt_kill); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK), + priv->reply_agg_tx_stats.scd_query); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_TEST_BAD_CRC32_MSK), + priv->reply_agg_tx_stats.bad_crc32); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK), + priv->reply_agg_tx_stats.response); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK), + priv->reply_agg_tx_stats.dump_tx); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK), + priv->reply_agg_tx_stats.delay_tx); + pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", + priv->reply_agg_tx_stats.unknown); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; + ssize_t ret; + struct iwl_sensitivity_data *data; + + data = &priv->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + + +static ssize_t iwl_dbgfs_chain_noise_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; + ssize_t ret; + struct iwl_chain_noise_data *data; + + data = &priv->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_power_save_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) & + CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + iwl_send_statistics_request(priv, CMD_SYNC, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[128]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", + priv->event_log.ucode_trace ? "On" : "Off"); + pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n", + priv->event_log.non_wraps_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n", + priv->event_log.wraps_once_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", + priv->event_log.wraps_more_count); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &trace) != 1) + return -EFAULT; + + if (trace) { + priv->event_log.ucode_trace = true; + if (iwl_is_alive(priv)) { + /* start collecting data now */ + mod_timer(&priv->ucode_trace, jiffies); + } + } else { + priv->event_log.ucode_trace = false; + del_timer_sync(&priv->ucode_trace); + } + + return count; +} + +static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", + priv->missed_beacon_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || + missed > IWL_MISSED_BEACON_THRESHOLD_MAX) + priv->missed_beacon_threshold = + IWL_MISSED_BEACON_THRESHOLD_DEF; + else + priv->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%u\n", + priv->plcp_delta_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &plcp) != 1) + return -EINVAL; + if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || + (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) + priv->plcp_delta_threshold = + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; + else + priv->plcp_delta_threshold = plcp; + return count; +} + +static ssize_t iwl_dbgfs_rf_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct iwl_rf_reset *rf_reset = &priv->rf_reset; + + pos += scnprintf(buf + pos, bufsz - pos, + "RF reset statistics\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + rf_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + rf_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + rf_reset->reset_reject_count); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_rf_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int ret; + + ret = iwl_force_rf_reset(priv, true); + return ret ? ret : count; +} + +static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &flush) != 1) + return -EINVAL; + + if (iwl_is_rfkill(priv)) + return -EFAULT; + + iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); + + return count; +} + +static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char buf[200]; + const size_t bufsz = sizeof(buf); + + if (!priv->bt_enable_flag) { + pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); + } + pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n", + priv->bt_enable_flag); + pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n", + priv->bt_full_concurrent ? "full concurrency" : "3-wire"); + pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, " + "last traffic notif: %d\n", + priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load); + pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " + "kill_ack_mask: %x, kill_cts_mask: %x\n", + priv->bt_ch_announce, priv->kill_ack_mask, + priv->kill_cts_mask); + + pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: "); + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n"); + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + pos += scnprintf(buf + pos, bufsz - pos, "High\n"); + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + pos += scnprintf(buf + pos, bufsz - pos, "Low\n"); + break; + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + default: + pos += scnprintf(buf + pos, bufsz - pos, "None\n"); + break; + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + + int pos = 0; + char buf[40]; + const size_t bufsz = sizeof(buf); + + if (priv->cfg->ht_params) + pos += scnprintf(buf + pos, bufsz - pos, + "use %s for aggregation\n", + (priv->hw_params.use_rts_for_aggregation) ? + "rts/cts" : "cts-to-self"); + else + pos += scnprintf(buf + pos, bufsz - pos, "N/A"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int rts; + + if (!priv->cfg->ht_params) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &rts) != 1) + return -EINVAL; + if (rts) + priv->hw_params.use_rts_for_aggregation = true; + else + priv->hw_params.use_rts_for_aggregation = false; + return count; +} + +static int iwl_cmd_echo_test(struct iwl_priv *priv) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = REPLY_ECHO, + .len = { 0 }, + .flags = CMD_SYNC, + }; + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) + IWL_ERR(priv, "echo testing fail: 0X%x\n", ret); + else + IWL_DEBUG_INFO(priv, "echo testing pass\n"); + return ret; +} + +static ssize_t iwl_dbgfs_echo_test_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + iwl_cmd_echo_test(priv); + return count; +} + ++#ifdef CONFIG_IWLWIFI_DEBUG +static ssize_t iwl_dbgfs_log_event_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -ENOMEM; + + ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + } + return ret; +} + +static ssize_t iwl_dbgfs_log_event_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &event_log_flag) != 1) + return -EFAULT; + if (event_log_flag == 1) + iwl_dump_nic_event_log(priv, true, NULL, false); + + return count; +} ++#endif + +static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[120]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "Sensitivity calibrations %s\n", + (priv->calib_disabled & + IWL_SENSITIVITY_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + pos += scnprintf(buf + pos, bufsz - pos, + "Chain noise calibrations %s\n", + (priv->calib_disabled & + IWL_CHAIN_NOISE_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + pos += scnprintf(buf + pos, bufsz - pos, + "Tx power calibrations %s\n", + (priv->calib_disabled & + IWL_TX_POWER_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &calib_disabled) != 1) + return -EFAULT; + + priv->calib_disabled = calib_disabled; + + return count; +} + +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); +DEBUGFS_READ_WRITE_FILE_OPS(rf_reset); +DEBUGFS_READ_FILE_OPS(rxon_flags); +DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(txfifo_flush); +DEBUGFS_READ_FILE_OPS(ucode_bt_stats); +DEBUGFS_READ_FILE_OPS(bt_traffic); +DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); +DEBUGFS_READ_FILE_OPS(reply_tx_error); +DEBUGFS_WRITE_FILE_OPS(echo_test); ++#ifdef CONFIG_IWLWIFI_DEBUG +DEBUGFS_READ_WRITE_FILE_OPS(log_event); ++#endif +DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); + +/* + * Create the debugfs files and directories + * + */ +int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + struct dentry *phyd = priv->hw->wiphy->debugfsdir; + struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + + dir_drv = debugfs_create_dir(name, phyd); + if (!dir_drv) + return -ENOMEM; + + priv->debugfs_dir = dir_drv; + + dir_data = debugfs_create_dir("data", dir_drv); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dir_drv); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dir_drv); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); + + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); ++#ifdef CONFIG_IWLWIFI_DEBUG + DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); ++#endif + + if (iwl_advanced_bt_coexist(priv)) + DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); + + /* Calibrations disabled/enabled status*/ + DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); + + if (iwl_trans_dbgfs_register(priv->trans, dir_debug)) + goto err; + return 0; + +err: + IWL_ERR(priv, "Can't create the debugfs directory\n"); + iwl_dbgfs_unregister(priv); + return -ENOMEM; +} + +/** + * Remove the debugfs files and directories + * + */ +void iwl_dbgfs_unregister(struct iwl_priv *priv) +{ + if (!priv->debugfs_dir) + return; + + debugfs_remove_recursive(priv->debugfs_dir); + priv->debugfs_dir = NULL; +} diff --combined drivers/net/wireless/mwifiex/cfg80211.c index 510397b,ce61b6f..80e9b2a --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@@ -1484,7 -1484,7 +1484,7 @@@ struct net_device *mwifiex_add_virtual_ struct wireless_dev *wdev;
if (!adapter) - return NULL; + return ERR_PTR(-EFAULT);
switch (type) { case NL80211_IFTYPE_UNSPECIFIED: @@@ -1494,12 -1494,12 +1494,12 @@@ if (priv->bss_mode) { wiphy_err(wiphy, "cannot create multiple sta/adhoc ifaces\n"); - return NULL; + return ERR_PTR(-EINVAL); }
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) - return NULL; + return ERR_PTR(-ENOMEM);
wdev->wiphy = wiphy; priv->wdev = wdev; @@@ -1522,12 -1522,12 +1522,12 @@@
if (priv->bss_mode) { wiphy_err(wiphy, "Can't create multiple AP interfaces"); - return NULL; + return ERR_PTR(-EINVAL); }
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) - return NULL; + return ERR_PTR(-ENOMEM);
priv->wdev = wdev; wdev->wiphy = wiphy; @@@ -1544,14 -1544,15 +1544,15 @@@ break; default: wiphy_err(wiphy, "type not supported\n"); - return NULL; + return ERR_PTR(-EINVAL); }
dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name, ether_setup, 1); if (!dev) { wiphy_err(wiphy, "no memory available for netdevice\n"); - goto error; + priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; + return ERR_PTR(-ENOMEM); }
mwifiex_init_priv_params(priv, dev); @@@ -1582,7 -1583,9 +1583,9 @@@ /* Register network device */ if (register_netdevice(dev)) { wiphy_err(wiphy, "cannot register virtual network device\n"); - goto error; + free_netdev(dev); + priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; + return ERR_PTR(-EFAULT); }
sema_init(&priv->async_sem, 1); @@@ -1594,12 -1597,6 +1597,6 @@@ mwifiex_dev_debugfs_init(priv); #endif return dev; - error: - if (dev && (dev->reg_state == NETREG_UNREGISTERED)) - free_netdev(dev); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - - return NULL; } EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
@@@ -1716,7 -1713,7 +1713,7 @@@ int mwifiex_register_cfg80211(struct mw wdev_priv = wiphy_priv(wiphy); *(unsigned long *)wdev_priv = (unsigned long)adapter;
- set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev); + set_wiphy_dev(wiphy, priv->adapter->dev);
ret = wiphy_register(wiphy); if (ret < 0) { diff --combined net/batman-adv/translation-table.c index 5180d50,2ab83d7..3806d9b --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -1,4 -1,5 +1,4 @@@ -/* - * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * @@@ -15,6 -16,7 +15,6 @@@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA - * */
#include "main.h" @@@ -46,7 -48,7 +46,7 @@@ static int compare_tt(const struct hlis static void tt_start_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); - queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, + queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work, msecs_to_jiffies(5000)); }
@@@ -139,13 -141,14 +139,14 @@@ static void tt_orig_list_entry_free_rcu struct tt_orig_list_entry *orig_entry;
orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); - atomic_dec(&orig_entry->orig_node->tt_size); - orig_node_free_ref(orig_entry->orig_node); + batadv_orig_node_free_ref(orig_entry->orig_node); kfree(orig_entry); }
static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry) { + /* to avoid race conditions, immediately decrease the tt counter */ + atomic_dec(&orig_entry->orig_node->tt_size); call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu); }
@@@ -171,7 -174,7 +172,7 @@@ static void tt_local_event(struct bat_p atomic_set(&bat_priv->tt_ogm_append_cnt, 0); }
-int tt_len(int changes_num) +int batadv_tt_len(int changes_num) { return changes_num * sizeof(struct tt_change); } @@@ -179,18 -182,18 +180,18 @@@ static int tt_local_init(struct bat_priv *bat_priv) { if (bat_priv->tt_local_hash) - return 1; + return 0;
- bat_priv->tt_local_hash = hash_new(1024); + bat_priv->tt_local_hash = batadv_hash_new(1024);
if (!bat_priv->tt_local_hash) - return 0; + return -ENOMEM;
- return 1; + return 0; }
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, - int ifindex) +void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, + int ifindex) { struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; @@@ -219,7 -222,7 +220,7 @@@
memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); tt_local_entry->common.flags = NO_FLAGS; - if (is_wifi_iface(ifindex)) + if (batadv_is_wifi_iface(ifindex)) tt_local_entry->common.flags |= TT_CLIENT_WIFI; atomic_set(&tt_local_entry->common.refcount, 2); tt_local_entry->last_seen = jiffies; @@@ -230,8 -233,7 +231,8 @@@
/* The local entry has to be marked as NEW to avoid to send it in * a full table response going out before the next ttvn increment - * (consistency check) */ + * (consistency check) + */ tt_local_entry->common.flags |= TT_CLIENT_NEW;
hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, @@@ -274,64 -276,14 +275,64 @@@ out tt_global_entry_free_ref(tt_global_entry); }
-int tt_changes_fill_buffer(struct bat_priv *bat_priv, - unsigned char *buff, int buff_len) +static void tt_realloc_packet_buff(unsigned char **packet_buff, + int *packet_buff_len, int min_packet_len, + int new_packet_len) +{ + unsigned char *new_buff; + + new_buff = kmalloc(new_packet_len, GFP_ATOMIC); + + /* keep old buffer if kmalloc should fail */ + if (new_buff) { + memcpy(new_buff, *packet_buff, min_packet_len); + kfree(*packet_buff); + *packet_buff = new_buff; + *packet_buff_len = new_packet_len; + } +} + +static void tt_prepare_packet_buff(struct bat_priv *bat_priv, + unsigned char **packet_buff, + int *packet_buff_len, int min_packet_len) +{ + struct hard_iface *primary_if; + int req_len; + + primary_if = primary_if_get_selected(bat_priv); + + req_len = min_packet_len; + req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes)); + + /* if we have too many changes for one packet don't send any + * and wait for the tt table request which will be fragmented + */ + if ((!primary_if) || (req_len > primary_if->soft_iface->mtu)) + req_len = min_packet_len; + + tt_realloc_packet_buff(packet_buff, packet_buff_len, + min_packet_len, req_len); + + if (primary_if) + hardif_free_ref(primary_if); +} + +static int tt_changes_fill_buff(struct bat_priv *bat_priv, + unsigned char **packet_buff, + int *packet_buff_len, int min_packet_len) { - int count = 0, tot_changes = 0; struct tt_change_node *entry, *safe; + int count = 0, tot_changes = 0, new_len; + unsigned char *tt_buff; + + tt_prepare_packet_buff(bat_priv, packet_buff, + packet_buff_len, min_packet_len); + + new_len = *packet_buff_len - min_packet_len; + tt_buff = *packet_buff + min_packet_len;
- if (buff_len > 0) - tot_changes = buff_len / tt_len(1); + if (new_len > 0) + tot_changes = new_len / batadv_tt_len(1);
spin_lock_bh(&bat_priv->tt_changes_list_lock); atomic_set(&bat_priv->tt_local_changes, 0); @@@ -339,7 -291,7 +340,7 @@@ list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, list) { if (count < tot_changes) { - memcpy(buff + tt_len(count), + memcpy(tt_buff + batadv_tt_len(count), &entry->change, sizeof(struct tt_change)); count++; } @@@ -353,23 -305,25 +354,23 @@@ kfree(bat_priv->tt_buff); bat_priv->tt_buff_len = 0; bat_priv->tt_buff = NULL; - /* We check whether this new OGM has no changes due to size - * problems */ - if (buff_len > 0) { - /** - * if kmalloc() fails we will reply with the full table + /* check whether this new OGM has no changes due to size problems */ + if (new_len > 0) { + /* if kmalloc() fails we will reply with the full table * instead of providing the diff */ - bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); + bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC); if (bat_priv->tt_buff) { - memcpy(bat_priv->tt_buff, buff, buff_len); - bat_priv->tt_buff_len = buff_len; + memcpy(bat_priv->tt_buff, tt_buff, new_len); + bat_priv->tt_buff_len = new_len; } } spin_unlock_bh(&bat_priv->tt_buff_lock);
- return tot_changes; + return count; }
-int tt_local_seq_print_text(struct seq_file *seq, void *offset) +int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); @@@ -436,8 -390,7 +437,8 @@@ static void tt_local_set_pending(struc
/* The local client has to be marked as "pending to be removed" but has * to be kept in the table in order to send it in a full table - * response issued before the net ttvn increment (consistency check) */ + * response issued before the net ttvn increment (consistency check) + */ tt_local_entry->common.flags |= TT_CLIENT_PENDING;
bat_dbg(DBG_TT, bat_priv, @@@ -445,8 -398,8 +446,8 @@@ tt_local_entry->common.addr, message); }
-void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, - const char *message, bool roaming) +void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, + const char *message, bool roaming) { struct tt_local_entry *tt_local_entry = NULL;
@@@ -531,7 -484,7 +532,7 @@@ static void tt_local_table_free(struct spin_unlock_bh(list_lock); }
- hash_destroy(hash); + batadv_hash_destroy(hash);
bat_priv->tt_local_hash = NULL; } @@@ -539,14 -492,14 +540,14 @@@ static int tt_global_init(struct bat_priv *bat_priv) { if (bat_priv->tt_global_hash) - return 1; + return 0;
- bat_priv->tt_global_hash = hash_new(1024); + bat_priv->tt_global_hash = batadv_hash_new(1024);
if (!bat_priv->tt_global_hash) - return 0; + return -ENOMEM;
- return 1; + return 0; }
static void tt_changes_list_free(struct bat_priv *bat_priv) @@@ -611,9 -564,9 +612,9 @@@ static void tt_global_add_orig_entry(st }
/* caller must hold orig_node refcount */ -int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_addr, uint8_t ttvn, bool roaming, - bool wifi) +int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_addr, uint8_t ttvn, + bool roaming, bool wifi) { struct tt_global_entry *tt_global_entry = NULL; int ret = 0; @@@ -677,8 -630,8 +678,8 @@@
out_remove: /* remove address from local hash if present */ - tt_local_remove(bat_priv, tt_global_entry->common.addr, - "global tt received", roaming); + batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr, + "global tt received", roaming); ret = 1; out: if (tt_global_entry) @@@ -714,7 -667,7 +715,7 @@@ static void tt_global_print_entry(struc } }
-int tt_global_seq_print_text(struct seq_file *seq, void *offset) +int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); @@@ -898,8 -851,8 +899,8 @@@ static void tt_global_del(struct bat_pr * If there are other originators left, we directly delete * the originator. * 2) the client roamed to us => we can directly delete - * the global entry, since it is useless now. */ - + * the global entry, since it is useless now. + */ tt_local_entry = tt_local_hash_find(bat_priv, tt_global_entry->common.addr); if (tt_local_entry) { @@@ -919,8 -872,8 +920,8 @@@ out tt_local_entry_free_ref(tt_local_entry); }
-void tt_global_del_orig(struct bat_priv *bat_priv, - struct orig_node *orig_node, const char *message) +void batadv_tt_global_del_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, const char *message) { struct tt_global_entry *tt_global_entry; struct tt_common_entry *tt_common_entry; @@@ -958,7 -911,6 +959,6 @@@ } spin_unlock_bh(list_lock); } - atomic_set(&orig_node->tt_size, 0); orig_node->tt_initialised = false; }
@@@ -1031,7 -983,7 +1031,7 @@@ static void tt_global_table_free(struc spin_unlock_bh(list_lock); }
- hash_destroy(hash); + batadv_hash_destroy(hash);
bat_priv->tt_global_hash = NULL; } @@@ -1048,9 -1000,8 +1048,9 @@@ static bool _is_ap_isolated(struct tt_l return ret; }
-struct orig_node *transtable_search(struct bat_priv *bat_priv, - const uint8_t *src, const uint8_t *addr) +struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv, + const uint8_t *src, + const uint8_t *addr) { struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; @@@ -1072,8 -1023,7 +1072,8 @@@ goto out;
/* check whether the clients should not communicate due to AP - * isolation */ + * isolation + */ if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) goto out;
@@@ -1082,7 -1032,7 +1082,7 @@@ rcu_read_lock(); head = &tt_global_entry->orig_list; hlist_for_each_entry_rcu(orig_entry, node, head, list) { - router = orig_node_get_router(orig_entry->orig_node); + router = batadv_orig_node_get_router(orig_entry->orig_node); if (!router) continue;
@@@ -1090,7 -1040,7 +1090,7 @@@ orig_node = orig_entry->orig_node; best_tq = router->tq_avg; } - neigh_node_free_ref(router); + batadv_neigh_node_free_ref(router); } /* found anything? */ if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) @@@ -1155,7 -1105,7 +1155,7 @@@ static uint16_t tt_global_crc(struct ba }
/* Calculates the checksum of the local table */ -uint16_t tt_local_crc(struct bat_priv *bat_priv) +static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv) { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_local_hash; @@@ -1172,8 -1122,7 +1172,8 @@@ hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { /* not yet committed clients have not to be taken into - * account while computing the CRC */ + * account while computing the CRC + */ if (tt_common_entry->flags & TT_CLIENT_NEW) continue; total_one = 0; @@@ -1207,11 -1156,10 +1207,11 @@@ static void tt_save_orig_buffer(struct const unsigned char *tt_buff, uint8_t tt_num_changes) { - uint16_t tt_buff_len = tt_len(tt_num_changes); + uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
/* Replace the old buffer only if I received something in the - * last OGM (the OGM could carry no changes) */ + * last OGM (the OGM could carry no changes) + */ spin_lock_bh(&orig_node->tt_buff_lock); if (tt_buff_len > 0) { kfree(orig_node->tt_buff); @@@ -1240,8 -1188,7 +1240,8 @@@ static void tt_req_purge(struct bat_pri }
/* returns the pointer to the new tt_req_node struct if no request - * has already been issued for this orig_node, NULL otherwise */ + * has already been issued for this orig_node, NULL otherwise + */ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, struct orig_node *orig_node) { @@@ -1351,8 -1298,7 +1351,8 @@@ static struct sk_buff *tt_response_fill rcu_read_unlock();
/* store in the message the number of entries we have successfully - * copied */ + * copied + */ tt_response->tt_data = htons(tt_count);
out: @@@ -1375,8 -1321,7 +1375,8 @@@ static int send_tt_request(struct bat_p goto out;
/* The new tt_req will be issued only if I'm not waiting for a - * reply from the same orig_node yet */ + * reply from the same orig_node yet + */ tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); if (!tt_req_node) goto out; @@@ -1402,7 -1347,7 +1402,7 @@@ if (full_table) tt_request->flags |= TT_FULL_TABLE;
- neigh_node = orig_node_get_router(dst_orig_node); + neigh_node = batadv_orig_node_get_router(dst_orig_node); if (!neigh_node) goto out;
@@@ -1411,14 -1356,12 +1411,14 @@@ dst_orig_node->orig, neigh_node->addr, (full_table ? 'F' : '.'));
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX); + + batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = 0;
out: if (neigh_node) - neigh_node_free_ref(neigh_node); + batadv_neigh_node_free_ref(neigh_node); if (primary_if) hardif_free_ref(primary_if); if (ret) @@@ -1460,7 -1403,7 +1460,7 @@@ static bool send_other_tt_response(stru if (!res_dst_orig_node) goto out;
- neigh_node = orig_node_get_router(res_dst_orig_node); + neigh_node = batadv_orig_node_get_router(res_dst_orig_node); if (!neigh_node) goto out;
@@@ -1473,7 -1416,7 +1473,7 @@@
/* I don't have the requested data */ if (orig_ttvn != req_ttvn || - tt_request->tt_data != req_dst_orig_node->tt_crc) + tt_request->tt_data != htons(req_dst_orig_node->tt_crc)) goto out;
/* If the full table has been explicitly requested */ @@@ -1484,8 -1427,7 +1484,8 @@@ full_table = false;
/* In this version, fragmentation is not implemented, then - * I'll send only one packet with as much TT entries as I can */ + * I'll send only one packet with as much TT entries as I can + */ if (!full_table) { spin_lock_bh(&req_dst_orig_node->tt_buff_lock); tt_len = req_dst_orig_node->tt_buff_len; @@@ -1538,9 -1480,7 +1538,9 @@@ res_dst_orig_node->orig, neigh_node->addr, req_dst_orig_node->orig, req_ttvn);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX); + + batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = true; goto out;
@@@ -1549,11 -1489,11 +1549,11 @@@ unlock
out: if (res_dst_orig_node) - orig_node_free_ref(res_dst_orig_node); + batadv_orig_node_free_ref(res_dst_orig_node); if (req_dst_orig_node) - orig_node_free_ref(req_dst_orig_node); + batadv_orig_node_free_ref(req_dst_orig_node); if (neigh_node) - neigh_node_free_ref(neigh_node); + batadv_neigh_node_free_ref(neigh_node); if (primary_if) hardif_free_ref(primary_if); if (!ret) @@@ -1588,7 -1528,7 +1588,7 @@@ static bool send_my_tt_response(struct if (!orig_node) goto out;
- neigh_node = orig_node_get_router(orig_node); + neigh_node = batadv_orig_node_get_router(orig_node); if (!neigh_node) goto out;
@@@ -1597,8 -1537,7 +1597,8 @@@ goto out;
/* If the full table has been explicitly requested or the gap - * is too big send the whole local translation table */ + * is too big send the whole local translation table + */ if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || !bat_priv->tt_buff) full_table = true; @@@ -1606,8 -1545,7 +1606,8 @@@ full_table = false;
/* In this version, fragmentation is not implemented, then - * I'll send only one packet with as much TT entries as I can */ + * I'll send only one packet with as much TT entries as I can + */ if (!full_table) { spin_lock_bh(&bat_priv->tt_buff_lock); tt_len = bat_priv->tt_buff_len; @@@ -1658,9 -1596,7 +1658,9 @@@ orig_node->orig, neigh_node->addr, (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX); + + batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = true; goto out;
@@@ -1668,9 -1604,9 +1668,9 @@@ unlock spin_unlock_bh(&bat_priv->tt_buff_lock); out: if (orig_node) - orig_node_free_ref(orig_node); + batadv_orig_node_free_ref(orig_node); if (neigh_node) - neigh_node_free_ref(neigh_node); + batadv_neigh_node_free_ref(neigh_node); if (primary_if) hardif_free_ref(primary_if); if (!ret) @@@ -1679,12 -1615,12 +1679,12 @@@ return true; }
-bool send_tt_response(struct bat_priv *bat_priv, - struct tt_query_packet *tt_request) +bool batadv_send_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request) { - if (is_my_mac(tt_request->dst)) { + if (batadv_is_my_mac(tt_request->dst)) { /* don't answer backbone gws! */ - if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) + if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) return true;
return send_my_tt_response(bat_priv, tt_request); @@@ -1699,19 -1635,18 +1699,19 @@@ static void _tt_update_changes(struct b uint16_t tt_num_changes, uint8_t ttvn) { int i; + int is_wifi;
for (i = 0; i < tt_num_changes; i++) { - if ((tt_change + i)->flags & TT_CLIENT_DEL) + if ((tt_change + i)->flags & TT_CLIENT_DEL) { tt_global_del(bat_priv, orig_node, (tt_change + i)->addr, "tt removed by changes", (tt_change + i)->flags & TT_CLIENT_ROAM); - else - if (!tt_global_add(bat_priv, orig_node, - (tt_change + i)->addr, ttvn, false, - (tt_change + i)->flags & - TT_CLIENT_WIFI)) + } else { + is_wifi = (tt_change + i)->flags & TT_CLIENT_WIFI; + if (!batadv_tt_global_add(bat_priv, orig_node, + (tt_change + i)->addr, ttvn, + false, is_wifi)) /* In case of problem while storing a * global_entry, we stop the updating * procedure without committing the @@@ -1719,7 -1654,6 +1719,7 @@@ * corrupted data on tt_request */ return; + } } orig_node->tt_initialised = true; } @@@ -1734,11 -1668,11 +1734,11 @@@ static void tt_fill_gtable(struct bat_p goto out;
/* Purge the old table first.. */ - tt_global_del_orig(bat_priv, orig_node, "Received full table"); + batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
_tt_update_changes(bat_priv, orig_node, (struct tt_change *)(tt_response + 1), - tt_response->tt_data, tt_response->ttvn); + ntohs(tt_response->tt_data), tt_response->ttvn);
spin_lock_bh(&orig_node->tt_buff_lock); kfree(orig_node->tt_buff); @@@ -1750,7 -1684,7 +1750,7 @@@
out: if (orig_node) - orig_node_free_ref(orig_node); + batadv_orig_node_free_ref(orig_node); }
static void tt_update_changes(struct bat_priv *bat_priv, @@@ -1766,7 -1700,7 +1766,7 @@@ atomic_set(&orig_node->last_ttvn, ttvn); }
-bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) +bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) { struct tt_local_entry *tt_local_entry = NULL; bool ret = false; @@@ -1775,8 -1709,7 +1775,8 @@@ if (!tt_local_entry) goto out; /* Check if the client has been logically deleted (but is kept for - * consistency purpose) */ + * consistency purpose) + */ if (tt_local_entry->common.flags & TT_CLIENT_PENDING) goto out; ret = true; @@@ -1786,20 -1719,19 +1786,20 @@@ out return ret; }
-void handle_tt_response(struct bat_priv *bat_priv, - struct tt_query_packet *tt_response) +void batadv_handle_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_response) { struct tt_req_node *node, *safe; struct orig_node *orig_node = NULL;
bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", - tt_response->src, tt_response->ttvn, tt_response->tt_data, + tt_response->src, tt_response->ttvn, + ntohs(tt_response->tt_data), (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
/* we should have never asked a backbone gw */ - if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) + if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src)) goto out;
orig_node = orig_hash_find(bat_priv, tt_response->src); @@@ -1809,8 -1741,7 +1809,8 @@@ if (tt_response->flags & TT_FULL_TABLE) tt_fill_gtable(bat_priv, tt_response); else - tt_update_changes(bat_priv, orig_node, tt_response->tt_data, + tt_update_changes(bat_priv, orig_node, + ntohs(tt_response->tt_data), tt_response->ttvn, (struct tt_change *)(tt_response + 1));
@@@ -1827,25 -1758,20 +1827,25 @@@ /* Recalculate the CRC for this orig_node and store it */ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); /* Roaming phase is over: tables are in sync again. I can - * unset the flag */ + * unset the flag + */ orig_node->tt_poss_change = false; out: if (orig_node) - orig_node_free_ref(orig_node); + batadv_orig_node_free_ref(orig_node); }
-int tt_init(struct bat_priv *bat_priv) +int batadv_tt_init(struct bat_priv *bat_priv) { - if (!tt_local_init(bat_priv)) - return 0; + int ret;
- if (!tt_global_init(bat_priv)) - return 0; + ret = tt_local_init(bat_priv); + if (ret < 0) + return ret; + + ret = tt_global_init(bat_priv); + if (ret < 0) + return ret;
tt_start_timer(bat_priv);
@@@ -1885,8 -1811,7 +1885,8 @@@ static void tt_roam_purge(struct bat_pr * maximum number of possible roaming phases. In this case the ROAMING_ADV * will not be sent. * - * returns true if the ROAMING_ADV can be sent, false otherwise */ + * returns true if the ROAMING_ADV can be sent, false otherwise + */ static bool tt_check_roam_count(struct bat_priv *bat_priv, uint8_t *client) { @@@ -1895,8 -1820,7 +1895,8 @@@
spin_lock_bh(&bat_priv->tt_roam_list_lock); /* The new tt_req will be issued only if I'm not waiting for a - * reply from the same orig_node yet */ + * reply from the same orig_node yet + */ list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { if (!compare_eth(tt_roam_node->addr, client)) continue; @@@ -1939,8 -1863,7 +1939,8 @@@ static void send_roam_adv(struct bat_pr struct hard_iface *primary_if;
/* before going on we have to check whether the client has - * already roamed to us too many times */ + * already roamed to us too many times + */ if (!tt_check_roam_count(bat_priv, client)) goto out;
@@@ -1964,7 -1887,7 +1964,7 @@@ memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); memcpy(roam_adv_packet->client, client, ETH_ALEN);
- neigh_node = orig_node_get_router(orig_node); + neigh_node = batadv_orig_node_get_router(orig_node); if (!neigh_node) goto out;
@@@ -1972,14 -1895,12 +1972,14 @@@ "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", orig_node->orig, client, neigh_node->addr);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX); + + batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = 0;
out: if (neigh_node) - neigh_node_free_ref(neigh_node); + batadv_neigh_node_free_ref(neigh_node); if (ret) kfree_skb(skb); return; @@@ -2000,7 -1921,7 +2000,7 @@@ static void tt_purge(struct work_struc tt_start_timer(bat_priv); }
-void tt_free(struct bat_priv *bat_priv) +void batadv_tt_free(struct bat_priv *bat_priv) { cancel_delayed_work_sync(&bat_priv->tt_work);
@@@ -2014,8 -1935,7 +2014,8 @@@ }
/* This function will enable or disable the specified flags for all the entries - * in the given hash table and returns the number of modified entries */ + * in the given hash table and returns the number of modified entries + */ static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, bool enable) { @@@ -2091,67 -2011,30 +2091,67 @@@ static void tt_local_purge_pending_clie
}
-void tt_commit_changes(struct bat_priv *bat_priv) +static int tt_commit_changes(struct bat_priv *bat_priv, + unsigned char **packet_buff, int *packet_buff_len, + int packet_min_len) { - uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, - TT_CLIENT_NEW, false); - /* all the reset entries have now to be effectively counted as local - * entries */ + uint16_t changed_num = 0; + + if (atomic_read(&bat_priv->tt_local_changes) < 1) + return -ENOENT; + + changed_num = tt_set_flags(bat_priv->tt_local_hash, + TT_CLIENT_NEW, false); + + /* all reset entries have to be counted as local entries */ atomic_add(changed_num, &bat_priv->num_local_tt); tt_local_purge_pending_clients(bat_priv); + bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */ atomic_inc(&bat_priv->ttvn); bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", (uint8_t)atomic_read(&bat_priv->ttvn)); bat_priv->tt_poss_change = false; + + /* reset the sending counter */ + atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); + + return tt_changes_fill_buff(bat_priv, packet_buff, + packet_buff_len, packet_min_len); }
-bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) +/* when calling this function (hard_iface == primary_if) has to be true */ +int batadv_tt_append_diff(struct bat_priv *bat_priv, + unsigned char **packet_buff, int *packet_buff_len, + int packet_min_len) +{ + int tt_num_changes; + + /* if at least one change happened */ + tt_num_changes = tt_commit_changes(bat_priv, packet_buff, + packet_buff_len, packet_min_len); + + /* if the changes have been sent often enough */ + if ((tt_num_changes < 0) && + (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) { + tt_realloc_packet_buff(packet_buff, packet_buff_len, + packet_min_len, packet_min_len); + tt_num_changes = 0; + } + + return tt_num_changes; +} + +bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, + uint8_t *dst) { struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; - bool ret = true; + bool ret = false;
if (!atomic_read(&bat_priv->ap_isolation)) - return false; + goto out;
tt_local_entry = tt_local_hash_find(bat_priv, dst); if (!tt_local_entry) @@@ -2161,10 -2044,10 +2161,10 @@@ if (!tt_global_entry) goto out;
- if (_is_ap_isolated(tt_local_entry, tt_global_entry)) + if (!_is_ap_isolated(tt_local_entry, tt_global_entry)) goto out;
- ret = false; + ret = true;
out: if (tt_global_entry) @@@ -2174,27 -2057,24 +2174,27 @@@ return ret; }
-void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes, - uint8_t ttvn, uint16_t tt_crc) +void batadv_tt_update_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes, + uint8_t ttvn, uint16_t tt_crc) { uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); bool full_table = true;
/* don't care about a backbone gateways updates. */ - if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) return;
/* orig table not initialised AND first diff is in the OGM OR the ttvn - * increased by one -> we can apply the attached changes */ + * increased by one -> we can apply the attached changes + */ if ((!orig_node->tt_initialised && ttvn == 1) || ttvn - orig_ttvn == 1) { /* the OGM could not contain the changes due to their size or * because they have already been sent TT_OGM_APPEND_MAX times. - * In this case send a tt request */ + * In this case send a tt request + */ if (!tt_num_changes) { full_table = false; goto request_table; @@@ -2205,8 -2085,7 +2205,8 @@@
/* Even if we received the precomputed crc with the OGM, we * prefer to recompute it to spot any possible inconsistency - * in the global table */ + * in the global table + */ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
/* The ttvn alone is not enough to guarantee consistency @@@ -2216,19 -2095,17 +2216,19 @@@ * consistent or not. E.g. a node could disconnect while its * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case * checking the CRC value is mandatory to detect the - * inconsistency */ + * inconsistency + */ if (orig_node->tt_crc != tt_crc) goto request_table;
/* Roaming phase is over: tables are in sync again. I can - * unset the flag */ + * unset the flag + */ orig_node->tt_poss_change = false; } else { /* if we missed more than one change or our tables are not - * in sync anymore -> request fresh tt data */ - + * in sync anymore -> request fresh tt data + */ if (!orig_node->tt_initialised || ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { request_table: @@@ -2247,8 -2124,7 +2247,8 @@@ * originator to another one. This entry is kept is still kept for consistency * purposes */ -bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) +bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv, + uint8_t *addr) { struct tt_global_entry *tt_global_entry; bool ret = false; diff --combined net/bluetooth/l2cap_core.c index 4ca8824,4554e80..d42dfdc --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@@ -30,14 -30,32 +30,14 @@@
#include <linux/module.h>
-#include <linux/types.h> -#include <linux/capability.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/poll.h> -#include <linux/fcntl.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/socket.h> -#include <linux/skbuff.h> -#include <linux/list.h> -#include <linux/device.h> #include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <linux/uaccess.h> #include <linux/crc16.h> -#include <net/sock.h> - -#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/smp.h> +#include <net/bluetooth/a2mp.h>
bool disable_ertm;
@@@ -55,9 -73,6 +55,9 @@@ static int l2cap_build_conf_req(struct static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err);
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event); + /* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) @@@ -181,7 -196,7 +181,7 @@@ static void __l2cap_state_change(struc state_to_string(state));
chan->state = state; - chan->ops->state_change(chan->data, state); + chan->ops->state_change(chan, state); }
static void l2cap_state_change(struct l2cap_chan *chan, int state) @@@ -209,37 -224,6 +209,37 @@@ static inline void l2cap_chan_set_err(s release_sock(sk); }
+static void __set_retrans_timer(struct l2cap_chan *chan) +{ + if (!delayed_work_pending(&chan->monitor_timer) && + chan->retrans_timeout) { + l2cap_set_timer(chan, &chan->retrans_timer, + msecs_to_jiffies(chan->retrans_timeout)); + } +} + +static void __set_monitor_timer(struct l2cap_chan *chan) +{ + __clear_retrans_timer(chan); + if (chan->monitor_timeout) { + l2cap_set_timer(chan, &chan->monitor_timer, + msecs_to_jiffies(chan->monitor_timeout)); + } +} + +static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, + u16 seq) +{ + struct sk_buff *skb; + + skb_queue_walk(head, skb) { + if (bt_cb(skb)->control.txseq == seq) + return skb; + } + + return NULL; +} + /* ---- L2CAP sequence number lists ---- */
/* For ERTM, ordered lists of sequence numbers must be tracked for @@@ -382,7 -366,7 +382,7 @@@ static void l2cap_chan_timeout(struct w
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data); + chan->ops->close(chan); mutex_unlock(&conn->chan_lock);
l2cap_chan_put(chan); @@@ -408,9 -392,6 +408,9 @@@ struct l2cap_chan *l2cap_chan_create(vo
atomic_set(&chan->refcnt, 1);
+ /* This flag is cleared in l2cap_chan_ready() */ + set_bit(CONF_NOT_COMPLETE, &chan->conf_state); + BT_DBG("chan %p", chan);
return chan; @@@ -449,7 -430,7 +449,7 @@@ static void __l2cap_chan_add(struct l2c case L2CAP_CHAN_CONN_ORIENTED: if (conn->hcon->type == LE_LINK) { /* LE connection */ - chan->omtu = L2CAP_LE_DEFAULT_MTU; + chan->omtu = L2CAP_DEFAULT_MTU; chan->scid = L2CAP_CID_LE_DATA; chan->dcid = L2CAP_CID_LE_DATA; } else { @@@ -466,13 -447,6 +466,13 @@@ chan->omtu = L2CAP_DEFAULT_MTU; break;
+ case L2CAP_CHAN_CONN_FIX_A2MP: + chan->scid = L2CAP_CID_A2MP; + chan->dcid = L2CAP_CID_A2MP; + chan->omtu = L2CAP_A2MP_DEFAULT_MTU; + chan->imtu = L2CAP_A2MP_DEFAULT_MTU; + break; + default: /* Raw socket can send/recv signalling messages only */ chan->scid = L2CAP_CID_SIGNALING; @@@ -492,16 -466,18 +492,16 @@@ list_add(&chan->list, &conn->chan_l); }
-static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { mutex_lock(&conn->chan_lock); __l2cap_chan_add(conn, chan); mutex_unlock(&conn->chan_lock); }
-static void l2cap_chan_del(struct l2cap_chan *chan, int err) +void l2cap_chan_del(struct l2cap_chan *chan, int err) { - struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; - struct sock *parent = bt_sk(sk)->parent;
__clear_chan_timer(chan);
@@@ -514,22 -490,34 +514,22 @@@ l2cap_chan_put(chan);
chan->conn = NULL; - hci_conn_put(conn->hcon); - } - - lock_sock(sk); - - __l2cap_state_change(chan, BT_CLOSED); - sock_set_flag(sk, SOCK_ZAPPED);
- if (err) - __l2cap_chan_set_err(chan, err); - - if (parent) { - bt_accept_unlink(sk); - parent->sk_data_ready(parent, 0); - } else - sk->sk_state_change(sk); + if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) + hci_conn_put(conn->hcon); + }
- release_sock(sk); + if (chan->ops->teardown) + chan->ops->teardown(chan, err);
- if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && - test_bit(CONF_INPUT_DONE, &chan->conf_state))) + if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) return;
- skb_queue_purge(&chan->tx_q); - - if (chan->mode == L2CAP_MODE_ERTM) { - struct srej_list *l, *tmp; + switch(chan->mode) { + case L2CAP_MODE_BASIC: + break;
+ case L2CAP_MODE_ERTM: __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); @@@ -538,15 -526,30 +538,15 @@@
l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->retrans_list); - list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { - list_del(&l->list); - kfree(l); - } - } -} - -static void l2cap_chan_cleanup_listen(struct sock *parent) -{ - struct sock *sk;
- BT_DBG("parent %p", parent); - - /* Close not yet accepted channels */ - while ((sk = bt_accept_dequeue(parent, NULL))) { - struct l2cap_chan *chan = l2cap_pi(sk)->chan; - - l2cap_chan_lock(chan); - __clear_chan_timer(chan); - l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); + /* fall through */
- chan->ops->close(chan->data); + case L2CAP_MODE_STREAMING: + skb_queue_purge(&chan->tx_q); + break; } + + return; }
void l2cap_chan_close(struct l2cap_chan *chan, int reason) @@@ -559,8 -562,12 +559,8 @@@
switch (chan->state) { case BT_LISTEN: - lock_sock(sk); - l2cap_chan_cleanup_listen(sk); - - __l2cap_state_change(chan, BT_CLOSED); - sock_set_flag(sk, SOCK_ZAPPED); - release_sock(sk); + if (chan->ops->teardown) + chan->ops->teardown(chan, 0); break;
case BT_CONNECTED: @@@ -588,7 -595,7 +588,7 @@@ rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(result); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } @@@ -602,8 -609,9 +602,8 @@@ break;
default: - lock_sock(sk); - sock_set_flag(sk, SOCK_ZAPPED); - release_sock(sk); + if (chan->ops->teardown) + chan->ops->teardown(chan, 0); break; } } @@@ -619,7 -627,7 +619,7 @@@ static inline u8 l2cap_get_auth_type(st default: return HCI_AT_NO_BONDING; } - } else if (chan->psm == cpu_to_le16(0x0001)) { + } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP;
@@@ -765,11 -773,9 +765,11 @@@ static inline void __unpack_control(str if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { __unpack_extended_control(get_unaligned_le32(skb->data), &bt_cb(skb)->control); + skb_pull(skb, L2CAP_EXT_CTRL_SIZE); } else { __unpack_enhanced_control(get_unaligned_le16(skb->data), &bt_cb(skb)->control); + skb_pull(skb, L2CAP_ENH_CTRL_SIZE); } }
@@@ -824,102 -830,66 +824,102 @@@ static inline void __pack_control(struc } }
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) +static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) { - struct sk_buff *skb; - struct l2cap_hdr *lh; - struct l2cap_conn *conn = chan->conn; - int count, hlen; - - if (chan->state != BT_CONNECTED) - return; - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - hlen = L2CAP_EXT_HDR_SIZE; + return L2CAP_EXT_HDR_SIZE; else - hlen = L2CAP_ENH_HDR_SIZE; + return L2CAP_ENH_HDR_SIZE; +} + +static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, + u32 control) +{ + struct sk_buff *skb; + struct l2cap_hdr *lh; + int hlen = __ertm_hdr_size(chan);
if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE;
- BT_DBG("chan %p, control 0x%8.8x", chan, control); - - count = min_t(unsigned int, conn->mtu, hlen); - - control |= __set_sframe(chan); - - if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= __set_ctrl_final(chan); + skb = bt_skb_alloc(hlen, GFP_KERNEL);
- if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) - control |= __set_ctrl_poll(chan); - - skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) - return; + return ERR_PTR(-ENOMEM);
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid);
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); + else + put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); + u16 fcs = crc16(0, (u8 *)skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); }
skb->priority = HCI_PRIO_MAX; - l2cap_do_send(chan, skb); + return skb; }
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) +static void l2cap_send_sframe(struct l2cap_chan *chan, + struct l2cap_ctrl *control) { - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + struct sk_buff *skb; + u32 control_field; + + BT_DBG("chan %p, control %p", chan, control); + + if (!control->sframe) + return; + + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && + !control->poll) + control->final = 1; + + if (control->super == L2CAP_SUPER_RR) + clear_bit(CONN_RNR_SENT, &chan->conn_state); + else if (control->super == L2CAP_SUPER_RNR) set_bit(CONN_RNR_SENT, &chan->conn_state); - } else - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- control |= __set_reqseq(chan, chan->buffer_seq); + if (control->super != L2CAP_SUPER_SREJ) { + chan->last_acked_seq = control->reqseq; + __clear_ack_timer(chan); + } + + BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, + control->final, control->poll, control->super); + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + control_field = __pack_extended_control(control); + else + control_field = __pack_enhanced_control(control); + + skb = l2cap_create_sframe_pdu(chan, control_field); + if (!IS_ERR(skb)) + l2cap_do_send(chan, skb); +} + +static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) +{ + struct l2cap_ctrl control; + + BT_DBG("chan %p, poll %d", chan, poll); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.poll = poll; + + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) + control.super = L2CAP_SUPER_RNR; + else + control.super = L2CAP_SUPER_RR;
- l2cap_send_sframe(chan, control); + control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &control); }
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) @@@ -944,13 -914,25 +944,13 @@@ static void l2cap_send_conn_req(struct
static void l2cap_chan_ready(struct l2cap_chan *chan) { - struct sock *sk = chan->sk; - struct sock *parent; - - lock_sock(sk); - - parent = bt_sk(sk)->parent; - - BT_DBG("sk %p, parent %p", sk, parent); - + /* This clears all conf flags, including CONF_NOT_COMPLETE */ chan->conf_state = 0; __clear_chan_timer(chan);
- __l2cap_state_change(chan, BT_CONNECTED); - sk->sk_state_change(sk); - - if (parent) - parent->sk_data_ready(parent, 0); + chan->state = BT_CONNECTED;
- release_sock(sk); + chan->ops->ready(chan); }
static void l2cap_do_start(struct l2cap_chan *chan) @@@ -971,7 -953,7 +971,7 @@@ l2cap_send_conn_req(chan); } else { struct l2cap_info_req req; - req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); @@@ -1013,11 -995,6 +1013,11 @@@ static void l2cap_send_disconn_req(stru __clear_ack_timer(chan); }
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { + __l2cap_state_change(chan, BT_DISCONN); + return; + } + req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), @@@ -1076,20 -1053,20 +1076,20 @@@ static void l2cap_conn_start(struct l2c if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { struct sock *parent = bt_sk(sk)->parent; - rsp.result = cpu_to_le16(L2CAP_CR_PEND); - rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); + rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); + rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); if (parent) parent->sk_data_ready(parent, 0);
} else { __l2cap_state_change(chan, BT_CONFIG); - rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); } release_sock(sk); } else { - rsp.result = cpu_to_le16(L2CAP_CR_PEND); - rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); + rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); + rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND); }
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, @@@ -1173,7 -1150,13 +1173,7 @@@ static void l2cap_le_conn_ready(struct
lock_sock(parent);
- /* Check for backlog size */ - if (sk_acceptq_is_full(parent)) { - BT_DBG("backlog full %d", parent->sk_ack_backlog); - goto clean; - } - - chan = pchan->ops->new_connection(pchan->data); + chan = pchan->ops->new_connection(pchan); if (!chan) goto clean;
@@@ -1188,7 -1171,10 +1188,7 @@@
l2cap_chan_add(conn, chan);
- __set_chan_timer(chan, sk->sk_sndtimeo); - - __l2cap_state_change(chan, BT_CONNECTED); - parent->sk_data_ready(parent, 0); + l2cap_chan_ready(chan);
clean: release_sock(parent); @@@ -1212,11 -1198,6 +1212,11 @@@ static void l2cap_conn_ready(struct l2c
l2cap_chan_lock(chan);
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { + l2cap_chan_unlock(chan); + continue; + } + if (conn->hcon->type == LE_LINK) { if (smp_conn_security(conn, chan->sec_level)) l2cap_chan_ready(chan); @@@ -1289,7 -1270,7 +1289,7 @@@ static void l2cap_conn_del(struct hci_c
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data); + chan->ops->close(chan); l2cap_chan_put(chan); }
@@@ -1463,17 -1444,21 +1463,17 @@@ int l2cap_chan_connect(struct l2cap_cha goto done; }
- lock_sock(sk); - - switch (sk->sk_state) { + switch (chan->state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ err = 0; - release_sock(sk); goto done;
case BT_CONNECTED: /* Already connected */ err = -EISCONN; - release_sock(sk); goto done;
case BT_OPEN: @@@ -1483,12 -1468,13 +1483,12 @@@
default: err = -EBADFD; - release_sock(sk); goto done; }
/* Set destination address and psm */ + lock_sock(sk); bacpy(&bt_sk(sk)->dst, dst); - release_sock(sk);
chan->psm = psm; @@@ -1590,20 -1576,23 +1590,20 @@@ int __l2cap_wait_ack(struct sock *sk static void l2cap_monitor_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - monitor_timer.work); + monitor_timer.work);
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- if (chan->retry_count >= chan->remote_max_tx) { - l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); + if (!chan->conn) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; }
- chan->retry_count++; - __set_monitor_timer(chan); + l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@@ -1611,293 -1600,234 +1611,293 @@@ static void l2cap_retrans_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - retrans_timer.work); + retrans_timer.work);
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- chan->retry_count = 1; - __set_monitor_timer(chan); - - set_bit(CONN_WAIT_F, &chan->conn_state); - - l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); + if (!chan->conn) { + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return; + }
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); }
-static void l2cap_drop_acked_frames(struct l2cap_chan *chan) +static void l2cap_streaming_send(struct l2cap_chan *chan, + struct sk_buff_head *skbs) { struct sk_buff *skb; + struct l2cap_ctrl *control;
- while ((skb = skb_peek(&chan->tx_q)) && - chan->unacked_frames) { - if (bt_cb(skb)->control.txseq == chan->expected_ack_seq) - break; + BT_DBG("chan %p, skbs %p", chan, skbs);
- skb = skb_dequeue(&chan->tx_q); - kfree_skb(skb); + skb_queue_splice_tail_init(skbs, &chan->tx_q);
- chan->unacked_frames--; - } + while (!skb_queue_empty(&chan->tx_q)) {
- if (!chan->unacked_frames) - __clear_retrans_timer(chan); -} + skb = skb_dequeue(&chan->tx_q);
-static void l2cap_streaming_send(struct l2cap_chan *chan) -{ - struct sk_buff *skb; - u32 control; - u16 fcs; + bt_cb(skb)->control.retries = 1; + control = &bt_cb(skb)->control;
- while ((skb = skb_dequeue(&chan->tx_q))) { - control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); - control |= __set_txseq(chan, chan->next_tx_seq); - control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); - __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); + control->reqseq = 0; + control->txseq = chan->next_tx_seq; + + __pack_control(chan, control, skb);
if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)skb->data, - skb->len - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, - skb->data + skb->len - L2CAP_FCS_SIZE); + u16 fcs = crc16(0, (u8 *) skb->data, skb->len); + put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); }
l2cap_do_send(chan, skb);
+ BT_DBG("Sent txseq %d", (int)control->txseq); + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + chan->frames_sent++; } }
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) +static int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; - u16 fcs; - u32 control; + struct l2cap_ctrl *control; + int sent = 0;
- skb = skb_peek(&chan->tx_q); - if (!skb) - return; + BT_DBG("chan %p", chan);
- while (bt_cb(skb)->control.txseq != tx_seq) { - if (skb_queue_is_last(&chan->tx_q, skb)) - return; + if (chan->state != BT_CONNECTED) + return -ENOTCONN;
- skb = skb_queue_next(&chan->tx_q, skb); - } + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + return 0;
- if (bt_cb(skb)->control.retries == chan->remote_max_tx && - chan->remote_max_tx) { - l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); - return; - } + while (chan->tx_send_head && + chan->unacked_frames < chan->remote_tx_win && + chan->tx_state == L2CAP_TX_STATE_XMIT) {
- tx_skb = skb_clone(skb, GFP_ATOMIC); - bt_cb(skb)->control.retries++; + skb = chan->tx_send_head;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); - control &= __get_sar_mask(chan); + bt_cb(skb)->control.retries = 1; + control = &bt_cb(skb)->control;
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= __set_ctrl_final(chan); + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control->final = 1;
- control |= __set_reqseq(chan, chan->buffer_seq); - control |= __set_txseq(chan, tx_seq); + control->reqseq = chan->buffer_seq; + chan->last_acked_seq = chan->buffer_seq; + control->txseq = chan->next_tx_seq;
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); + __pack_control(chan, control, skb);
- if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)tx_skb->data, - tx_skb->len - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, - tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); + if (chan->fcs == L2CAP_FCS_CRC16) { + u16 fcs = crc16(0, (u8 *) skb->data, skb->len); + put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + } + + /* Clone after data has been modified. Data is assumed to be + read-only (for locking purposes) on cloned sk_buffs. + */ + tx_skb = skb_clone(skb, GFP_KERNEL); + + if (!tx_skb) + break; + + __set_retrans_timer(chan); + + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + chan->unacked_frames++; + chan->frames_sent++; + sent++; + + if (skb_queue_is_last(&chan->tx_q, skb)) + chan->tx_send_head = NULL; + else + chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); + + l2cap_do_send(chan, tx_skb); + BT_DBG("Sent txseq %d", (int)control->txseq); }
- l2cap_do_send(chan, tx_skb); + BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent, + (int) chan->unacked_frames, skb_queue_len(&chan->tx_q)); + + return sent; }
-static int l2cap_ertm_send(struct l2cap_chan *chan) +static void l2cap_ertm_resend(struct l2cap_chan *chan) { - struct sk_buff *skb, *tx_skb; - u16 fcs; - u32 control; - int nsent = 0; + struct l2cap_ctrl control; + struct sk_buff *skb; + struct sk_buff *tx_skb; + u16 seq;
- if (chan->state != BT_CONNECTED) - return -ENOTCONN; + BT_DBG("chan %p", chan);
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) - return 0; + return;
- while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { + while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { + seq = l2cap_seq_list_pop(&chan->retrans_list);
- if (bt_cb(skb)->control.retries == chan->remote_max_tx && - chan->remote_max_tx) { - l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); - break; + skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); + if (!skb) { + BT_DBG("Error: Can't retransmit seq %d, frame missing", + seq); + continue; }
- tx_skb = skb_clone(skb, GFP_ATOMIC); - bt_cb(skb)->control.retries++; + control = bt_cb(skb)->control;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); - control &= __get_sar_mask(chan); + if (chan->max_tx != 0 && + bt_cb(skb)->control.retries > chan->max_tx) { + BT_DBG("Retry limit exceeded (%d)", chan->max_tx); + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + l2cap_seq_list_clear(&chan->retrans_list); + break; + }
+ control.reqseq = chan->buffer_seq; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= __set_ctrl_final(chan); + control.final = 1; + else + control.final = 0; + + if (skb_cloned(skb)) { + /* Cloned sk_buffs are read-only, so we need a + * writeable copy + */ + tx_skb = skb_copy(skb, GFP_ATOMIC); + } else { + tx_skb = skb_clone(skb, GFP_ATOMIC); + }
- control |= __set_reqseq(chan, chan->buffer_seq); - control |= __set_txseq(chan, chan->next_tx_seq); - control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); + if (!tx_skb) { + l2cap_seq_list_clear(&chan->retrans_list); + break; + }
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); + /* Update skb contents */ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { + put_unaligned_le32(__pack_extended_control(&control), + tx_skb->data + L2CAP_HDR_SIZE); + } else { + put_unaligned_le16(__pack_enhanced_control(&control), + tx_skb->data + L2CAP_HDR_SIZE); + }
if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)skb->data, - tx_skb->len - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, skb->data + - tx_skb->len - L2CAP_FCS_SIZE); + u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len); + put_unaligned_le16(fcs, skb_put(tx_skb, + L2CAP_FCS_SIZE)); }
l2cap_do_send(chan, tx_skb);
- __set_retrans_timer(chan); - - bt_cb(skb)->control.txseq = chan->next_tx_seq; + BT_DBG("Resent txseq %d", control.txseq);
- chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); - - if (bt_cb(skb)->control.retries == 1) { - chan->unacked_frames++; - - if (!nsent++) - __clear_ack_timer(chan); - } - - chan->frames_sent++; - - if (skb_queue_is_last(&chan->tx_q, skb)) - chan->tx_send_head = NULL; - else - chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); + chan->last_acked_seq = chan->buffer_seq; } - - return nsent; }
-static int l2cap_retransmit_frames(struct l2cap_chan *chan) +static void l2cap_retransmit(struct l2cap_chan *chan, + struct l2cap_ctrl *control) { - int ret; + BT_DBG("chan %p, control %p", chan, control);
- if (!skb_queue_empty(&chan->tx_q)) - chan->tx_send_head = chan->tx_q.next; - - chan->next_tx_seq = chan->expected_ack_seq; - ret = l2cap_ertm_send(chan); - return ret; + l2cap_seq_list_append(&chan->retrans_list, control->reqseq); + l2cap_ertm_resend(chan); }
-static void __l2cap_send_ack(struct l2cap_chan *chan) +static void l2cap_retransmit_all(struct l2cap_chan *chan, + struct l2cap_ctrl *control) { - u32 control = 0; + struct sk_buff *skb;
- control |= __set_reqseq(chan, chan->buffer_seq); + BT_DBG("chan %p, control %p", chan, control);
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); - set_bit(CONN_RNR_SENT, &chan->conn_state); - l2cap_send_sframe(chan, control); - return; - } + if (control->poll) + set_bit(CONN_SEND_FBIT, &chan->conn_state); + + l2cap_seq_list_clear(&chan->retrans_list);
- if (l2cap_ertm_send(chan) > 0) + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return;
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); - l2cap_send_sframe(chan, control); + if (chan->unacked_frames) { + skb_queue_walk(&chan->tx_q, skb) { + if (bt_cb(skb)->control.txseq == control->reqseq || + skb == chan->tx_send_head) + break; + } + + skb_queue_walk_from(&chan->tx_q, skb) { + if (skb == chan->tx_send_head) + break; + + l2cap_seq_list_append(&chan->retrans_list, + bt_cb(skb)->control.txseq); + } + + l2cap_ertm_resend(chan); + } }
static void l2cap_send_ack(struct l2cap_chan *chan) { - __clear_ack_timer(chan); - __l2cap_send_ack(chan); -} + struct l2cap_ctrl control; + u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, + chan->last_acked_seq); + int threshold;
-static void l2cap_send_srejtail(struct l2cap_chan *chan) -{ - struct srej_list *tail; - u32 control; + BT_DBG("chan %p last_acked_seq %d buffer_seq %d", + chan, chan->last_acked_seq, chan->buffer_seq); + + memset(&control, 0, sizeof(control)); + control.sframe = 1;
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); - control |= __set_ctrl_final(chan); + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && + chan->rx_state == L2CAP_RX_STATE_RECV) { + __clear_ack_timer(chan); + control.super = L2CAP_SUPER_RNR; + control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &control); + } else { + if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { + l2cap_ertm_send(chan); + /* If any i-frames were sent, they included an ack */ + if (chan->buffer_seq == chan->last_acked_seq) + frames_to_ack = 0; + }
- tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); - control |= __set_reqseq(chan, tail->tx_seq); + /* Ack now if the tx window is 3/4ths full. + * Calculate without mul or div + */ + threshold = chan->tx_win; + threshold += threshold << 1; + threshold >>= 2; + + BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack, + threshold); + + if (frames_to_ack >= threshold) { + __clear_ack_timer(chan); + control.super = L2CAP_SUPER_RR; + control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &control); + frames_to_ack = 0; + }
- l2cap_send_sframe(chan, control); + if (frames_to_ack) + __set_ack_timer(chan); + } }
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, @@@ -2026,7 -1956,10 +2026,7 @@@ static struct sk_buff *l2cap_create_ifr if (!conn) return ERR_PTR(-ENOTCONN);
- if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - hlen = L2CAP_EXT_HDR_SIZE; - else - hlen = L2CAP_ENH_HDR_SIZE; + hlen = __ertm_hdr_size(chan);
if (sdulen) hlen += L2CAP_SDULEN_SIZE; @@@ -2046,11 -1979,7 +2046,11 @@@ lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); + /* Control header is populated later */ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); + else + put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); @@@ -2061,7 -1990,9 +2061,7 @@@ return ERR_PTR(err); }
- if (chan->fcs == L2CAP_FCS_CRC16) - put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); - + bt_cb(skb)->control.fcs = chan->fcs; bt_cb(skb)->control.retries = 0; return skb; } @@@ -2073,6 -2004,7 +2073,6 @@@ static int l2cap_segment_sdu(struct l2c struct sk_buff *skb; u16 sdu_len; size_t pdu_len; - int err = 0; u8 sar;
BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); @@@ -2088,10 -2020,7 +2088,10 @@@ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */ - pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; + if (chan->fcs) + pdu_len -= L2CAP_FCS_SIZE; + + pdu_len -= __ertm_hdr_size(chan);
/* Remote device may have requested smaller PDUs */ pdu_len = min_t(size_t, pdu_len, chan->remote_mps); @@@ -2131,7 -2060,7 +2131,7 @@@ } }
- return err; + return 0; }
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, @@@ -2193,12 -2122,17 +2193,12 @@@ if (err) break;
- if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL) - chan->tx_send_head = seg_queue.next; - skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); - if (chan->mode == L2CAP_MODE_ERTM) - err = l2cap_ertm_send(chan); + l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); else - l2cap_streaming_send(chan); + l2cap_streaming_send(chan, &seg_queue);
- if (err >= 0) - err = len; + err = len;
/* If the skbs were not queued for sending, they'll still be in * seg_queue and need to be purged. @@@ -2214,296 -2148,6 +2214,296 @@@ return err; }
+static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) +{ + struct l2cap_ctrl control; + u16 seq; + + BT_DBG("chan %p, txseq %d", chan, txseq); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.super = L2CAP_SUPER_SREJ; + + for (seq = chan->expected_tx_seq; seq != txseq; + seq = __next_seq(chan, seq)) { + if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { + control.reqseq = seq; + l2cap_send_sframe(chan, &control); + l2cap_seq_list_append(&chan->srej_list, seq); + } + } + + chan->expected_tx_seq = __next_seq(chan, txseq); +} + +static void l2cap_send_srej_tail(struct l2cap_chan *chan) +{ + struct l2cap_ctrl control; + + BT_DBG("chan %p", chan); + + if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) + return; + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.super = L2CAP_SUPER_SREJ; + control.reqseq = chan->srej_list.tail; + l2cap_send_sframe(chan, &control); +} + +static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) +{ + struct l2cap_ctrl control; + u16 initial_head; + u16 seq; + + BT_DBG("chan %p, txseq %d", chan, txseq); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.super = L2CAP_SUPER_SREJ; + + /* Capture initial list head to allow only one pass through the list. */ + initial_head = chan->srej_list.head; + + do { + seq = l2cap_seq_list_pop(&chan->srej_list); + if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) + break; + + control.reqseq = seq; + l2cap_send_sframe(chan, &control); + l2cap_seq_list_append(&chan->srej_list, seq); + } while (chan->srej_list.head != initial_head); +} + +static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) +{ + struct sk_buff *acked_skb; + u16 ackseq; + + BT_DBG("chan %p, reqseq %d", chan, reqseq); + + if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) + return; + + BT_DBG("expected_ack_seq %d, unacked_frames %d", + chan->expected_ack_seq, chan->unacked_frames); + + for (ackseq = chan->expected_ack_seq; ackseq != reqseq; + ackseq = __next_seq(chan, ackseq)) { + + acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); + if (acked_skb) { + skb_unlink(acked_skb, &chan->tx_q); + kfree_skb(acked_skb); + chan->unacked_frames--; + } + } + + chan->expected_ack_seq = reqseq; + + if (chan->unacked_frames == 0) + __clear_retrans_timer(chan); + + BT_DBG("unacked_frames %d", (int) chan->unacked_frames); +} + +static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) +{ + BT_DBG("chan %p", chan); + + chan->expected_tx_seq = chan->buffer_seq; + l2cap_seq_list_clear(&chan->srej_list); + skb_queue_purge(&chan->srej_q); + chan->rx_state = L2CAP_RX_STATE_RECV; +} + +static void l2cap_tx_state_xmit(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event) +{ + BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, + event); + + switch (event) { + case L2CAP_EV_DATA_REQUEST: + if (chan->tx_send_head == NULL) + chan->tx_send_head = skb_peek(skbs); + + skb_queue_splice_tail_init(skbs, &chan->tx_q); + l2cap_ertm_send(chan); + break; + case L2CAP_EV_LOCAL_BUSY_DETECTED: + BT_DBG("Enter LOCAL_BUSY"); + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { + /* The SREJ_SENT state must be aborted if we are to + * enter the LOCAL_BUSY state. + */ + l2cap_abort_rx_srej_sent(chan); + } + + l2cap_send_ack(chan); + + break; + case L2CAP_EV_LOCAL_BUSY_CLEAR: + BT_DBG("Exit LOCAL_BUSY"); + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { + struct l2cap_ctrl local_control; + + memset(&local_control, 0, sizeof(local_control)); + local_control.sframe = 1; + local_control.super = L2CAP_SUPER_RR; + local_control.poll = 1; + local_control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &local_control); + + chan->retry_count = 1; + __set_monitor_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + } + break; + case L2CAP_EV_RECV_REQSEQ_AND_FBIT: + l2cap_process_reqseq(chan, control->reqseq); + break; + case L2CAP_EV_EXPLICIT_POLL: + l2cap_send_rr_or_rnr(chan, 1); + chan->retry_count = 1; + __set_monitor_timer(chan); + __clear_ack_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + break; + case L2CAP_EV_RETRANS_TO: + l2cap_send_rr_or_rnr(chan, 1); + chan->retry_count = 1; + __set_monitor_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + break; + case L2CAP_EV_RECV_FBIT: + /* Nothing to process */ + break; + default: + break; + } +} + +static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event) +{ + BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, + event); + + switch (event) { + case L2CAP_EV_DATA_REQUEST: + if (chan->tx_send_head == NULL) + chan->tx_send_head = skb_peek(skbs); + /* Queue data, but don't send. */ + skb_queue_splice_tail_init(skbs, &chan->tx_q); + break; + case L2CAP_EV_LOCAL_BUSY_DETECTED: + BT_DBG("Enter LOCAL_BUSY"); + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { + /* The SREJ_SENT state must be aborted if we are to + * enter the LOCAL_BUSY state. + */ + l2cap_abort_rx_srej_sent(chan); + } + + l2cap_send_ack(chan); + + break; + case L2CAP_EV_LOCAL_BUSY_CLEAR: + BT_DBG("Exit LOCAL_BUSY"); + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { + struct l2cap_ctrl local_control; + memset(&local_control, 0, sizeof(local_control)); + local_control.sframe = 1; + local_control.super = L2CAP_SUPER_RR; + local_control.poll = 1; + local_control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &local_control); + + chan->retry_count = 1; + __set_monitor_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + } + break; + case L2CAP_EV_RECV_REQSEQ_AND_FBIT: + l2cap_process_reqseq(chan, control->reqseq); + + /* Fall through */ + + case L2CAP_EV_RECV_FBIT: + if (control && control->final) { + __clear_monitor_timer(chan); + if (chan->unacked_frames > 0) + __set_retrans_timer(chan); + chan->retry_count = 0; + chan->tx_state = L2CAP_TX_STATE_XMIT; + BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); + } + break; + case L2CAP_EV_EXPLICIT_POLL: + /* Ignore */ + break; + case L2CAP_EV_MONITOR_TO: + if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { + l2cap_send_rr_or_rnr(chan, 1); + __set_monitor_timer(chan); + chan->retry_count++; + } else { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); + } + break; + default: + break; + } +} + +static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event) +{ + BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", + chan, control, skbs, event, chan->tx_state); + + switch (chan->tx_state) { + case L2CAP_TX_STATE_XMIT: + l2cap_tx_state_xmit(chan, control, skbs, event); + break; + case L2CAP_TX_STATE_WAIT_F: + l2cap_tx_state_wait_f(chan, control, skbs, event); + break; + default: + /* Ignore event */ + break; + } +} + +static void l2cap_pass_to_tx(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + BT_DBG("chan %p, control %p", chan, control); + l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); +} + +static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + BT_DBG("chan %p, control %p", chan, control); + l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); +} + /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { @@@ -2526,7 -2170,7 +2526,7 @@@ if (!nskb) continue;
- if (chan->ops->recv(chan->data, nskb)) + if (chan->ops->recv(chan, nskb)) kfree_skb(nskb); }
@@@ -2556,9 -2200,9 +2556,9 @@@ static struct sk_buff *l2cap_build_cmd( lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
if (conn->hcon->type == LE_LINK) - lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); + lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING); else - lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); + lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; @@@ -2670,8 -2314,8 +2670,8 @@@ static void l2cap_add_opt_efs(void **pt efs.stype = chan->local_stype; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); - efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); - efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); + efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); + efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); break;
case L2CAP_MODE_STREAMING: @@@ -2694,24 -2338,20 +2694,24 @@@ static void l2cap_ack_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - ack_timer.work); + ack_timer.work); + u16 frames_to_ack;
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- __l2cap_send_ack(chan); + frames_to_ack = __seq_offset(chan, chan->buffer_seq, + chan->last_acked_seq);
- l2cap_chan_unlock(chan); + if (frames_to_ack) + l2cap_send_rr_or_rnr(chan, 0);
+ l2cap_chan_unlock(chan); l2cap_chan_put(chan); }
-static inline int l2cap_ertm_init(struct l2cap_chan *chan) +int l2cap_ertm_init(struct l2cap_chan *chan) { int err;
@@@ -2720,6 -2360,7 +2720,6 @@@ chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; - chan->num_acked = 0; chan->frames_sent = 0; chan->last_acked_seq = 0; chan->sdu = NULL; @@@ -2740,15 -2381,12 +2740,15 @@@
skb_queue_head_init(&chan->srej_q);
- INIT_LIST_HEAD(&chan->srej_l); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); if (err < 0) return err;
- return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); + err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); + if (err < 0) + l2cap_seq_list_free(&chan->srej_list); + + return err; }
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) @@@ -2874,7 -2512,6 +2874,7 @@@ done break;
case L2CAP_MODE_STREAMING: + l2cap_txwin_setup(chan); rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; @@@ -2905,7 -2542,7 +2905,7 @@@ }
req->dcid = cpu_to_le16(chan->dcid); - req->flags = cpu_to_le16(0); + req->flags = __constant_cpu_to_le16(0);
return ptr - data; } @@@ -3125,7 -2762,7 +3125,7 @@@ done } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); - rsp->flags = cpu_to_le16(0x0000); + rsp->flags = __constant_cpu_to_le16(0);
return ptr - data; } @@@ -3224,7 -2861,7 +3224,7 @@@ static int l2cap_parse_conf_rsp(struct }
req->dcid = cpu_to_le16(chan->dcid); - req->flags = cpu_to_le16(0x0000); + req->flags = __constant_cpu_to_le16(0);
return ptr - data; } @@@ -3251,8 -2888,8 +3251,8 @@@ void __l2cap_connect_rsp_defer(struct l
rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); - rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
@@@ -3278,20 -2915,22 +3278,22 @@@ static void l2cap_conf_rfc_get(struct l while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
- switch (type) { - case L2CAP_CONF_RFC: - if (olen == sizeof(rfc)) - memcpy(&rfc, (void *)val, olen); - goto done; - } + if (type != L2CAP_CONF_RFC) + continue; + + if (olen != sizeof(rfc)) + break; + + memcpy(&rfc, (void *)val, olen); + goto done; }
/* Use sane default values in case a misbehaving remote device * did not send an RFC option. */ rfc.mode = chan->mode; - rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); - rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); + rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); + rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); rfc.max_pdu_size = cpu_to_le16(chan->imtu);
BT_ERR("Expected RFC option was not found, using defaults"); @@@ -3354,7 -2993,7 +3356,7 @@@ static inline int l2cap_connect_req(str lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */ - if (psm != cpu_to_le16(0x0001) && + if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; @@@ -3363,16 -3002,25 +3365,16 @@@
result = L2CAP_CR_NO_MEM;
- /* Check for backlog size */ - if (sk_acceptq_is_full(parent)) { - BT_DBG("backlog full %d", parent->sk_ack_backlog); + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) goto response; - }
- chan = pchan->ops->new_connection(pchan->data); + chan = pchan->ops->new_connection(pchan); if (!chan) goto response;
sk = chan->sk;
- /* Check if we already have channel with that dcid */ - if (__l2cap_get_chan_by_dcid(conn, scid)) { - sock_set_flag(sk, SOCK_ZAPPED); - chan->ops->close(chan->data); - goto response; - } - hci_conn_hold(conn->hcon);
bacpy(&bt_sk(sk)->src, conn->src); @@@ -3426,7 -3074,7 +3428,7 @@@ sendresp
if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; - info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); @@@ -3548,7 -3196,7 +3550,7 @@@ static inline int l2cap_config_req(stru if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { struct l2cap_cmd_rej_cid rej;
- rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); + rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID); rej.scid = cpu_to_le16(chan->scid); rej.dcid = cpu_to_le16(chan->dcid);
@@@ -3570,11 -3218,11 +3572,11 @@@ memcpy(chan->conf_req + chan->conf_len, req->data, len); chan->conf_len += len;
- if (flags & 0x0001) { + if (flags & L2CAP_CONF_FLAG_CONTINUATION) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, 0x0001), rsp); + L2CAP_CONF_SUCCESS, flags), rsp); goto unlock; }
@@@ -3597,6 -3245,8 +3599,6 @@@ if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan);
- l2cap_state_change(chan, BT_CONNECTED); - if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); @@@ -3628,7 -3278,7 +3630,7 @@@
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, 0x0000), rsp); + L2CAP_CONF_SUCCESS, flags), rsp); }
unlock: @@@ -3719,7 -3369,7 +3721,7 @@@ static inline int l2cap_config_rsp(stru goto done; }
- if (flags & 0x01) + if (flags & L2CAP_CONF_FLAG_CONTINUATION) goto done;
set_bit(CONF_INPUT_DONE, &chan->conf_state); @@@ -3727,6 -3377,7 +3729,6 @@@ if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan);
- l2cap_state_change(chan, BT_CONNECTED); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); @@@ -3780,7 -3431,7 +3782,7 @@@ static inline int l2cap_disconnect_req(
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data); + chan->ops->close(chan); l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock); @@@ -3814,7 -3465,7 +3816,7 @@@ static inline int l2cap_disconnect_rsp(
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data); + chan->ops->close(chan); l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock); @@@ -3835,8 -3486,8 +3837,8 @@@ static inline int l2cap_information_req u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; - rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); - rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); + rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; @@@ -3856,15 -3507,15 +3858,15 @@@ else l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
- rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); - rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); + rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); - rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); + rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } @@@ -3904,7 -3555,7 +3906,7 @@@ static inline int l2cap_information_rsp
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; - req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
conn->info_ident = l2cap_get_ident(conn);
@@@ -4139,9 -3790,9 +4141,9 @@@ static inline int l2cap_conn_param_upda
err = l2cap_check_conn_param(min, max, latency, to_multiplier); if (err) - rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); + rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else - rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); + rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, sizeof(rsp), &rsp); @@@ -4289,7 -3940,7 +4291,7 @@@ static inline void l2cap_sig_channel(st BT_ERR("Wrong link type (%d)", err);
/* FIXME: Map err to a valid reason */ - rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); + rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); }
@@@ -4321,38 -3972,65 +4323,38 @@@ static int l2cap_check_fcs(struct l2cap return 0; }
-static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) +static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { - u32 control = 0; + struct l2cap_ctrl control;
- chan->frames_sent = 0; + BT_DBG("chan %p", chan);
- control |= __set_reqseq(chan, chan->buffer_seq); + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.final = 1; + control.reqseq = chan->buffer_seq; + set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); - l2cap_send_sframe(chan, control); - set_bit(CONN_RNR_SENT, &chan->conn_state); + control.super = L2CAP_SUPER_RNR; + l2cap_send_sframe(chan, &control); }
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) - l2cap_retransmit_frames(chan); + if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + chan->unacked_frames > 0) + __set_retrans_timer(chan);
+ /* Send pending iframes */ l2cap_ertm_send(chan);
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && - chan->frames_sent == 0) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); - l2cap_send_sframe(chan, control); - } -} - -static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) -{ - struct sk_buff *next_skb; - int tx_seq_offset, next_tx_seq_offset; - - bt_cb(skb)->control.txseq = tx_seq; - bt_cb(skb)->control.sar = sar; - - next_skb = skb_peek(&chan->srej_q); - - tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); - - while (next_skb) { - if (bt_cb(next_skb)->control.txseq == tx_seq) - return -EINVAL; - - next_tx_seq_offset = __seq_offset(chan, - bt_cb(next_skb)->control.txseq, chan->buffer_seq); - - if (next_tx_seq_offset > tx_seq_offset) { - __skb_queue_before(&chan->srej_q, next_skb, skb); - return 0; - } - - if (skb_queue_is_last(&chan->srej_q, next_skb)) - next_skb = NULL; - else - next_skb = skb_queue_next(&chan->srej_q, next_skb); + test_bit(CONN_SEND_FBIT, &chan->conn_state)) { + /* F-bit wasn't sent in an s-frame or i-frame yet, so + * send it now. + */ + control.super = L2CAP_SUPER_RR; + l2cap_send_sframe(chan, &control); } - - __skb_queue_tail(&chan->srej_q, skb); - - return 0; }
static void append_skb_frag(struct sk_buff *skb, @@@ -4374,17 -4052,16 +4376,17 @@@ skb->truesize += new_frag->truesize; }
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) +static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, + struct l2cap_ctrl *control) { int err = -EINVAL;
- switch (__get_ctrl_sar(chan, control)) { + switch (control->sar) { case L2CAP_SAR_UNSEGMENTED: if (chan->sdu) break;
- err = chan->ops->recv(chan->data, skb); + err = chan->ops->recv(chan, skb); break;
case L2CAP_SAR_START: @@@ -4434,7 -4111,7 +4436,7 @@@ if (chan->sdu->len != chan->sdu_len) break;
- err = chan->ops->recv(chan->data, chan->sdu); + err = chan->ops->recv(chan, chan->sdu);
if (!err) { /* Reassembly complete */ @@@ -4456,609 -4133,448 +4458,609 @@@ return err; }
-static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) +void l2cap_chan_busy(struct l2cap_chan *chan, int busy) { - BT_DBG("chan %p, Enter local busy", chan); + u8 event;
- set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - l2cap_seq_list_clear(&chan->srej_list); + if (chan->mode != L2CAP_MODE_ERTM) + return;
- __set_ack_timer(chan); + event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; + l2cap_tx(chan, NULL, NULL, event); }
-static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) +static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) { - u32 control; - - if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) - goto done; + int err = 0; + /* Pass sequential frames to l2cap_reassemble_sdu() + * until a gap is encountered. + */
- control = __set_reqseq(chan, chan->buffer_seq); - control |= __set_ctrl_poll(chan); - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); - l2cap_send_sframe(chan, control); - chan->retry_count = 1; + BT_DBG("chan %p", chan);
- __clear_retrans_timer(chan); - __set_monitor_timer(chan); + while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + struct sk_buff *skb; + BT_DBG("Searching for skb with txseq %d (queue len %d)", + chan->buffer_seq, skb_queue_len(&chan->srej_q));
- set_bit(CONN_WAIT_F, &chan->conn_state); + skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
-done: - clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); - clear_bit(CONN_RNR_SENT, &chan->conn_state); + if (!skb) + break;
- BT_DBG("chan %p, Exit local busy", chan); -} + skb_unlink(skb, &chan->srej_q); + chan->buffer_seq = __next_seq(chan, chan->buffer_seq); + err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control); + if (err) + break; + }
-void l2cap_chan_busy(struct l2cap_chan *chan, int busy) -{ - if (chan->mode == L2CAP_MODE_ERTM) { - if (busy) - l2cap_ertm_enter_local_busy(chan); - else - l2cap_ertm_exit_local_busy(chan); + if (skb_queue_empty(&chan->srej_q)) { + chan->rx_state = L2CAP_RX_STATE_RECV; + l2cap_send_ack(chan); } + + return err; }
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) +static void l2cap_handle_srej(struct l2cap_chan *chan, + struct l2cap_ctrl *control) { struct sk_buff *skb; - u32 control;
- while ((skb = skb_peek(&chan->srej_q)) && - !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - int err; + BT_DBG("chan %p, control %p", chan, control);
- if (bt_cb(skb)->control.txseq != tx_seq) - break; + if (control->reqseq == chan->next_tx_seq) { + BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return; + }
- skb = skb_dequeue(&chan->srej_q); - control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar); - err = l2cap_reassemble_sdu(chan, skb, control); + skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- if (err < 0) { - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - break; - } + if (skb == NULL) { + BT_DBG("Seq %d not available for retransmission", + control->reqseq); + return; + }
- chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); - tx_seq = __next_seq(chan, tx_seq); + if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { + BT_DBG("Retry limit exceeded (%d)", chan->max_tx); + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return; } -}
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) -{ - struct srej_list *l, *tmp; - u32 control; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { - if (l->tx_seq == tx_seq) { - list_del(&l->list); - kfree(l); - return; + if (control->poll) { + l2cap_pass_to_tx(chan, control); + + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_retransmit(chan, control); + l2cap_ertm_send(chan); + + if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { + set_bit(CONN_SREJ_ACT, &chan->conn_state); + chan->srej_save_reqseq = control->reqseq; + } + } else { + l2cap_pass_to_tx_fbit(chan, control); + + if (control->final) { + if (chan->srej_save_reqseq != control->reqseq || + !test_and_clear_bit(CONN_SREJ_ACT, + &chan->conn_state)) + l2cap_retransmit(chan, control); + } else { + l2cap_retransmit(chan, control); + if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { + set_bit(CONN_SREJ_ACT, &chan->conn_state); + chan->srej_save_reqseq = control->reqseq; + } } } }
-static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) +static void l2cap_handle_rej(struct l2cap_chan *chan, + struct l2cap_ctrl *control) { - struct srej_list *new; - u32 control; - - while (tx_seq != chan->expected_tx_seq) { - control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); - control |= __set_reqseq(chan, chan->expected_tx_seq); - l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq); - l2cap_send_sframe(chan, control); + struct sk_buff *skb;
- new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); - if (!new) - return -ENOMEM; + BT_DBG("chan %p, control %p", chan, control);
- new->tx_seq = chan->expected_tx_seq; + if (control->reqseq == chan->next_tx_seq) { + BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return; + }
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); + skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- list_add_tail(&new->list, &chan->srej_l); + if (chan->max_tx && skb && + bt_cb(skb)->control.retries >= chan->max_tx) { + BT_DBG("Retry limit exceeded (%d)", chan->max_tx); + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return; }
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + l2cap_pass_to_tx(chan, control);
- return 0; + if (control->final) { + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_all(chan, control); + } else { + l2cap_retransmit_all(chan, control); + l2cap_ertm_send(chan); + if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) + set_bit(CONN_REJ_ACT, &chan->conn_state); + } }
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) +static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) { - u16 tx_seq = __get_txseq(chan, rx_control); - u16 req_seq = __get_reqseq(chan, rx_control); - u8 sar = __get_ctrl_sar(chan, rx_control); - int tx_seq_offset, expected_tx_seq_offset; - int num_to_ack = (chan->tx_win/6) + 1; - int err = 0; + BT_DBG("chan %p, txseq %d", chan, txseq);
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, - tx_seq, rx_control); + BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, + chan->expected_tx_seq);
- if (__is_ctrl_final(chan, rx_control) && - test_bit(CONN_WAIT_F, &chan->conn_state)) { - __clear_monitor_timer(chan); - if (chan->unacked_frames > 0) - __set_retrans_timer(chan); - clear_bit(CONN_WAIT_F, &chan->conn_state); - } + if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { + if (__seq_offset(chan, txseq, chan->last_acked_seq) >= + chan->tx_win) { + /* See notes below regarding "double poll" and + * invalid packets. + */ + if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { + BT_DBG("Invalid/Ignore - after SREJ"); + return L2CAP_TXSEQ_INVALID_IGNORE; + } else { + BT_DBG("Invalid - in window after SREJ sent"); + return L2CAP_TXSEQ_INVALID; + } + }
- chan->expected_ack_seq = req_seq; - l2cap_drop_acked_frames(chan); + if (chan->srej_list.head == txseq) { + BT_DBG("Expected SREJ"); + return L2CAP_TXSEQ_EXPECTED_SREJ; + }
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); + if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { + BT_DBG("Duplicate SREJ - txseq already stored"); + return L2CAP_TXSEQ_DUPLICATE_SREJ; + }
- /* invalid tx_seq */ - if (tx_seq_offset >= chan->tx_win) { - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - goto drop; + if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { + BT_DBG("Unexpected SREJ - not requested"); + return L2CAP_TXSEQ_UNEXPECTED_SREJ; + } }
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) - l2cap_send_ack(chan); - goto drop; + if (chan->expected_tx_seq == txseq) { + if (__seq_offset(chan, txseq, chan->last_acked_seq) >= + chan->tx_win) { + BT_DBG("Invalid - txseq outside tx window"); + return L2CAP_TXSEQ_INVALID; + } else { + BT_DBG("Expected"); + return L2CAP_TXSEQ_EXPECTED; + } }
- if (tx_seq == chan->expected_tx_seq) - goto expected; + if (__seq_offset(chan, txseq, chan->last_acked_seq) < + __seq_offset(chan, chan->expected_tx_seq, + chan->last_acked_seq)){ + BT_DBG("Duplicate - expected_tx_seq later than txseq"); + return L2CAP_TXSEQ_DUPLICATE; + } + + if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { + /* A source of invalid packets is a "double poll" condition, + * where delays cause us to send multiple poll packets. If + * the remote stack receives and processes both polls, + * sequence numbers can wrap around in such a way that a + * resent frame has a sequence number that looks like new data + * with a sequence gap. This would trigger an erroneous SREJ + * request. + * + * Fortunately, this is impossible with a tx window that's + * less than half of the maximum sequence number, which allows + * invalid frames to be safely ignored. + * + * With tx window sizes greater than half of the tx window + * maximum, the frame is invalid and cannot be ignored. This + * causes a disconnect. + */
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { - struct srej_list *first; + if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { + BT_DBG("Invalid/Ignore - txseq outside tx window"); + return L2CAP_TXSEQ_INVALID_IGNORE; + } else { + BT_DBG("Invalid - txseq outside tx window"); + return L2CAP_TXSEQ_INVALID; + } + } else { + BT_DBG("Unexpected - txseq indicates missing frames"); + return L2CAP_TXSEQ_UNEXPECTED; + } +}
- first = list_first_entry(&chan->srej_l, - struct srej_list, list); - if (tx_seq == first->tx_seq) { - l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); - l2cap_check_srej_gap(chan, tx_seq); +static int l2cap_rx_state_recv(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err = 0; + bool skb_in_use = 0;
- list_del(&first->list); - kfree(first); + BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, + event);
- if (list_empty(&chan->srej_l)) { - chan->buffer_seq = chan->buffer_seq_srej; - clear_bit(CONN_SREJ_SENT, &chan->conn_state); - l2cap_send_ack(chan); - BT_DBG("chan %p, Exit SREJ_SENT", chan); + switch (event) { + case L2CAP_EV_RECV_IFRAME: + switch (l2cap_classify_txseq(chan, control->txseq)) { + case L2CAP_TXSEQ_EXPECTED: + l2cap_pass_to_tx(chan, control); + + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + BT_DBG("Busy, discarding expected seq %d", + control->txseq); + break; } - } else { - struct srej_list *l;
- /* duplicated tx_seq */ - if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) - goto drop; + chan->expected_tx_seq = __next_seq(chan, + control->txseq); + + chan->buffer_seq = chan->expected_tx_seq; + skb_in_use = 1;
- list_for_each_entry(l, &chan->srej_l, list) { - if (l->tx_seq == tx_seq) { - l2cap_resend_srejframe(chan, tx_seq); - return 0; + err = l2cap_reassemble_sdu(chan, skb, control); + if (err) + break; + + if (control->final) { + if (!test_and_clear_bit(CONN_REJ_ACT, + &chan->conn_state)) { + control->final = 0; + l2cap_retransmit_all(chan, control); + l2cap_ertm_send(chan); } }
- err = l2cap_send_srejframe(chan, tx_seq); - if (err < 0) { - l2cap_send_disconn_req(chan->conn, chan, -err); - return err; + if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) + l2cap_send_ack(chan); + break; + case L2CAP_TXSEQ_UNEXPECTED: + l2cap_pass_to_tx(chan, control); + + /* Can't issue SREJ frames in the local busy state. + * Drop this frame, it will be seen as missing + * when local busy is exited. + */ + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + BT_DBG("Busy, discarding unexpected seq %d", + control->txseq); + break; } - } - } else { - expected_tx_seq_offset = __seq_offset(chan, - chan->expected_tx_seq, chan->buffer_seq);
- /* duplicated tx_seq */ - if (tx_seq_offset < expected_tx_seq_offset) - goto drop; - - set_bit(CONN_SREJ_SENT, &chan->conn_state); + /* There was a gap in the sequence, so an SREJ + * must be sent for each missing frame. The + * current frame is stored for later use. + */ + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = 1; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q));
- BT_DBG("chan %p, Enter SREJ", chan); + clear_bit(CONN_SREJ_ACT, &chan->conn_state); + l2cap_seq_list_clear(&chan->srej_list); + l2cap_send_srej(chan, control->txseq);
- INIT_LIST_HEAD(&chan->srej_l); - chan->buffer_seq_srej = chan->buffer_seq; + chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; + break; + case L2CAP_TXSEQ_DUPLICATE: + l2cap_pass_to_tx(chan, control); + break; + case L2CAP_TXSEQ_INVALID_IGNORE: + break; + case L2CAP_TXSEQ_INVALID: + default: + l2cap_send_disconn_req(chan->conn, chan, + ECONNRESET); + break; + } + break; + case L2CAP_EV_RECV_RR: + l2cap_pass_to_tx(chan, control); + if (control->final) { + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- __skb_queue_head_init(&chan->srej_q); - l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); + if (!test_and_clear_bit(CONN_REJ_ACT, + &chan->conn_state)) { + control->final = 0; + l2cap_retransmit_all(chan, control); + }
- /* Set P-bit only if there are some I-frames to ack. */ - if (__clear_ack_timer(chan)) - set_bit(CONN_SEND_PBIT, &chan->conn_state); + l2cap_ertm_send(chan); + } else if (control->poll) { + l2cap_send_i_or_rr_or_rnr(chan); + } else { + if (test_and_clear_bit(CONN_REMOTE_BUSY, + &chan->conn_state) && + chan->unacked_frames) + __set_retrans_timer(chan);
- err = l2cap_send_srejframe(chan, tx_seq); - if (err < 0) { - l2cap_send_disconn_req(chan->conn, chan, -err); - return err; + l2cap_ertm_send(chan); } - } - return 0; - -expected: - chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - - if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { - bt_cb(skb)->control.txseq = tx_seq; - bt_cb(skb)->control.sar = sar; - __skb_queue_tail(&chan->srej_q, skb); - return 0; + break; + case L2CAP_EV_RECV_RNR: + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); + l2cap_pass_to_tx(chan, control); + if (control && control->poll) { + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_send_rr_or_rnr(chan, 0); + } + __clear_retrans_timer(chan); + l2cap_seq_list_clear(&chan->retrans_list); + break; + case L2CAP_EV_RECV_REJ: + l2cap_handle_rej(chan, control); + break; + case L2CAP_EV_RECV_SREJ: + l2cap_handle_srej(chan, control); + break; + default: + break; }
- err = l2cap_reassemble_sdu(chan, skb, rx_control); - chan->buffer_seq = __next_seq(chan, chan->buffer_seq); - - if (err < 0) { - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return err; + if (skb && !skb_in_use) { + BT_DBG("Freeing %p", skb); + kfree_skb(skb); }
- if (__is_ctrl_final(chan, rx_control)) { - if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) - l2cap_retransmit_frames(chan); - } + return err; +}
+static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err = 0; + u16 txseq = control->txseq; + bool skb_in_use = 0; + + BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, + event); + + switch (event) { + case L2CAP_EV_RECV_IFRAME: + switch (l2cap_classify_txseq(chan, txseq)) { + case L2CAP_TXSEQ_EXPECTED: + /* Keep frame for reassembly later */ + l2cap_pass_to_tx(chan, control); + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = 1; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + chan->expected_tx_seq = __next_seq(chan, txseq); + break; + case L2CAP_TXSEQ_EXPECTED_SREJ: + l2cap_seq_list_pop(&chan->srej_list);
- chan->num_acked = (chan->num_acked + 1) % num_to_ack; - if (chan->num_acked == num_to_ack - 1) - l2cap_send_ack(chan); - else - __set_ack_timer(chan); + l2cap_pass_to_tx(chan, control); + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = 1; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q));
- return 0; + err = l2cap_rx_queued_iframes(chan); + if (err) + break;
-drop: - kfree_skb(skb); - return 0; -} + break; + case L2CAP_TXSEQ_UNEXPECTED: + /* Got a frame that can't be reassembled yet. + * Save it for later, and send SREJs to cover + * the missing frames. + */ + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = 1; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + l2cap_pass_to_tx(chan, control); + l2cap_send_srej(chan, control->txseq); + break; + case L2CAP_TXSEQ_UNEXPECTED_SREJ: + /* This frame was requested with an SREJ, but + * some expected retransmitted frames are + * missing. Request retransmission of missing + * SREJ'd frames. + */ + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = 1; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + l2cap_pass_to_tx(chan, control); + l2cap_send_srej_list(chan, control->txseq); + break; + case L2CAP_TXSEQ_DUPLICATE_SREJ: + /* We've already queued this frame. Drop this copy. */ + l2cap_pass_to_tx(chan, control); + break; + case L2CAP_TXSEQ_DUPLICATE: + /* Expecting a later sequence number, so this frame + * was already received. Ignore it completely. + */ + break; + case L2CAP_TXSEQ_INVALID_IGNORE: + break; + case L2CAP_TXSEQ_INVALID: + default: + l2cap_send_disconn_req(chan->conn, chan, + ECONNRESET); + break; + } + break; + case L2CAP_EV_RECV_RR: + l2cap_pass_to_tx(chan, control); + if (control->final) { + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) -{ - BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, - __get_reqseq(chan, rx_control), rx_control); + if (!test_and_clear_bit(CONN_REJ_ACT, + &chan->conn_state)) { + control->final = 0; + l2cap_retransmit_all(chan, control); + }
- chan->expected_ack_seq = __get_reqseq(chan, rx_control); - l2cap_drop_acked_frames(chan); + l2cap_ertm_send(chan); + } else if (control->poll) { + if (test_and_clear_bit(CONN_REMOTE_BUSY, + &chan->conn_state) && + chan->unacked_frames) { + __set_retrans_timer(chan); + }
- if (__is_ctrl_poll(chan, rx_control)) { - set_bit(CONN_SEND_FBIT, &chan->conn_state); - if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && - (chan->unacked_frames > 0)) + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_send_srej_tail(chan); + } else { + if (test_and_clear_bit(CONN_REMOTE_BUSY, + &chan->conn_state) && + chan->unacked_frames) __set_retrans_timer(chan);
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - l2cap_send_srejtail(chan); + l2cap_send_ack(chan); + } + break; + case L2CAP_EV_RECV_RNR: + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); + l2cap_pass_to_tx(chan, control); + if (control->poll) { + l2cap_send_srej_tail(chan); } else { - l2cap_send_i_or_rr_or_rnr(chan); + struct l2cap_ctrl rr_control; + memset(&rr_control, 0, sizeof(rr_control)); + rr_control.sframe = 1; + rr_control.super = L2CAP_SUPER_RR; + rr_control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &rr_control); }
- } else if (__is_ctrl_final(chan, rx_control)) { - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - - if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) - l2cap_retransmit_frames(chan); - - } else { - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && - (chan->unacked_frames > 0)) - __set_retrans_timer(chan); + break; + case L2CAP_EV_RECV_REJ: + l2cap_handle_rej(chan, control); + break; + case L2CAP_EV_RECV_SREJ: + l2cap_handle_srej(chan, control); + break; + }
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) - l2cap_send_ack(chan); - else - l2cap_ertm_send(chan); + if (skb && !skb_in_use) { + BT_DBG("Freeing %p", skb); + kfree_skb(skb); } + + return err; }
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) +static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) { - u16 tx_seq = __get_reqseq(chan, rx_control); - - BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); - - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - - chan->expected_ack_seq = tx_seq; - l2cap_drop_acked_frames(chan); + /* Make sure reqseq is for a packet that has been sent but not acked */ + u16 unacked;
- if (__is_ctrl_final(chan, rx_control)) { - if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) - l2cap_retransmit_frames(chan); - } else { - l2cap_retransmit_frames(chan); - - if (test_bit(CONN_WAIT_F, &chan->conn_state)) - set_bit(CONN_REJ_ACT, &chan->conn_state); - } + unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); + return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; } -static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) -{ - u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); - - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - - if (__is_ctrl_poll(chan, rx_control)) { - chan->expected_ack_seq = tx_seq; - l2cap_drop_acked_frames(chan); - - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_retransmit_one_frame(chan, tx_seq); +static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err = 0;
- l2cap_ertm_send(chan); + BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, + control, skb, event, chan->rx_state);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) { - chan->srej_save_reqseq = tx_seq; - set_bit(CONN_SREJ_ACT, &chan->conn_state); + if (__valid_reqseq(chan, control->reqseq)) { + switch (chan->rx_state) { + case L2CAP_RX_STATE_RECV: + err = l2cap_rx_state_recv(chan, control, skb, event); + break; + case L2CAP_RX_STATE_SREJ_SENT: + err = l2cap_rx_state_srej_sent(chan, control, skb, + event); + break; + default: + /* shut it down */ + break; } - } else if (__is_ctrl_final(chan, rx_control)) { - if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && - chan->srej_save_reqseq == tx_seq) - clear_bit(CONN_SREJ_ACT, &chan->conn_state); - else - l2cap_retransmit_one_frame(chan, tx_seq); } else { - l2cap_retransmit_one_frame(chan, tx_seq); - if (test_bit(CONN_WAIT_F, &chan->conn_state)) { - chan->srej_save_reqseq = tx_seq; - set_bit(CONN_SREJ_ACT, &chan->conn_state); - } + BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", + control->reqseq, chan->next_tx_seq, + chan->expected_ack_seq); + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); } + + return err; }
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) +static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff *skb) { - u16 tx_seq = __get_reqseq(chan, rx_control); + int err = 0;
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); + BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, + chan->rx_state);
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state); - chan->expected_ack_seq = tx_seq; - l2cap_drop_acked_frames(chan); + if (l2cap_classify_txseq(chan, control->txseq) == + L2CAP_TXSEQ_EXPECTED) { + l2cap_pass_to_tx(chan, control);
- if (__is_ctrl_poll(chan, rx_control)) - set_bit(CONN_SEND_FBIT, &chan->conn_state); + BT_DBG("buffer_seq %d->%d", chan->buffer_seq, + __next_seq(chan, chan->buffer_seq));
- if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { - __clear_retrans_timer(chan); - if (__is_ctrl_poll(chan, rx_control)) - l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); - return; - } + chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
- if (__is_ctrl_poll(chan, rx_control)) { - l2cap_send_srejtail(chan); + l2cap_reassemble_sdu(chan, skb, control); } else { - rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); - l2cap_send_sframe(chan, rx_control); - } -} - -static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) -{ - BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); + if (chan->sdu) { + kfree_skb(chan->sdu); + chan->sdu = NULL; + } + chan->sdu_last_frag = NULL; + chan->sdu_len = 0;
- if (__is_ctrl_final(chan, rx_control) && - test_bit(CONN_WAIT_F, &chan->conn_state)) { - __clear_monitor_timer(chan); - if (chan->unacked_frames > 0) - __set_retrans_timer(chan); - clear_bit(CONN_WAIT_F, &chan->conn_state); + if (skb) { + BT_DBG("Freeing %p", skb); + kfree_skb(skb); + } }
- switch (__get_ctrl_super(chan, rx_control)) { - case L2CAP_SUPER_RR: - l2cap_data_channel_rrframe(chan, rx_control); - break; - - case L2CAP_SUPER_REJ: - l2cap_data_channel_rejframe(chan, rx_control); - break; - - case L2CAP_SUPER_SREJ: - l2cap_data_channel_srejframe(chan, rx_control); - break; - - case L2CAP_SUPER_RNR: - l2cap_data_channel_rnrframe(chan, rx_control); - break; - } + chan->last_acked_seq = control->txseq; + chan->expected_tx_seq = __next_seq(chan, control->txseq);
- kfree_skb(skb); - return 0; + return err; }
-static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { - u32 control; - u16 req_seq; - int len, next_tx_seq_offset, req_seq_offset; + struct l2cap_ctrl *control = &bt_cb(skb)->control; + u16 len; + u8 event;
__unpack_control(chan, skb);
- control = __get_control(chan, skb->data); - skb_pull(skb, __ctrl_size(chan)); len = skb->len;
/* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery - * procedures and ask retransmission. + * procedures and ask for retransmission. */ if (l2cap_check_fcs(chan, skb)) goto drop;
- if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) + if (!control->sframe && control->sar == L2CAP_SAR_START) len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16) @@@ -5069,57 -4585,34 +5071,57 @@@ goto drop; }
- req_seq = __get_reqseq(chan, control); - - req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); - - next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, - chan->expected_ack_seq); + if (!control->sframe) { + int err;
- /* check for invalid req-seq */ - if (req_seq_offset > next_tx_seq_offset) { - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - goto drop; - } + BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", + control->sar, control->reqseq, control->final, + control->txseq);
- if (!__is_sframe(chan, control)) { - if (len < 0) { - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + /* Validate F-bit - F=0 always valid, F=1 only + * valid in TX WAIT_F + */ + if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) goto drop; + + if (chan->mode != L2CAP_MODE_STREAMING) { + event = L2CAP_EV_RECV_IFRAME; + err = l2cap_rx(chan, control, skb, event); + } else { + err = l2cap_stream_rx(chan, control, skb); }
- l2cap_data_channel_iframe(chan, control, skb); + if (err) + l2cap_send_disconn_req(chan->conn, chan, + ECONNRESET); } else { + const u8 rx_func_to_event[4] = { + L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, + L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ + }; + + /* Only I-frames are expected in streaming mode */ + if (chan->mode == L2CAP_MODE_STREAMING) + goto drop; + + BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", + control->reqseq, control->final, control->poll, + control->super); + if (len != 0) { BT_ERR("%d", len); l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; }
- l2cap_data_channel_sframe(chan, control, skb); + /* Validate F and P bits */ + if (control->final && (control->poll || + chan->tx_state != L2CAP_TX_STATE_WAIT_F)) + goto drop; + + event = rx_func_to_event[control->super]; + if (l2cap_rx(chan, control, skb, event)) + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); }
return 0; @@@ -5129,27 -4622,19 +5131,27 @@@ drop return 0; }
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) +static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, + struct sk_buff *skb) { struct l2cap_chan *chan;
chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { - BT_DBG("unknown cid 0x%4.4x", cid); - /* Drop packet and return */ - kfree_skb(skb); - return 0; + if (cid == L2CAP_CID_A2MP) { + chan = a2mp_channel_create(conn, skb); + if (!chan) { + kfree_skb(skb); + return; + } + + l2cap_chan_lock(chan); + } else { + BT_DBG("unknown cid 0x%4.4x", cid); + /* Drop packet and return */ + kfree_skb(skb); + return; + } }
BT_DBG("chan %p, len %d", chan, skb->len); @@@ -5167,13 -4652,49 +5169,13 @@@ if (chan->imtu < skb->len) goto drop;
- if (!chan->ops->recv(chan->data, skb)) + if (!chan->ops->recv(chan, skb)) goto done; break;
case L2CAP_MODE_ERTM: - l2cap_ertm_data_rcv(chan, skb); - - goto done; - case L2CAP_MODE_STREAMING: - control = __get_control(chan, skb->data); - skb_pull(skb, __ctrl_size(chan)); - len = skb->len; - - if (l2cap_check_fcs(chan, skb)) - goto drop; - - if (__is_sar_start(chan, control)) - len -= L2CAP_SDULEN_SIZE; - - if (chan->fcs == L2CAP_FCS_CRC16) - len -= L2CAP_FCS_SIZE; - - if (len > chan->mps || len < 0 || __is_sframe(chan, control)) - goto drop; - - tx_seq = __get_txseq(chan, control); - - if (chan->expected_tx_seq != tx_seq) { - /* Frame(s) missing - must discard partial SDU */ - kfree_skb(chan->sdu); - chan->sdu = NULL; - chan->sdu_last_frag = NULL; - chan->sdu_len = 0; - - /* TODO: Notify userland of missing data */ - } - - chan->expected_tx_seq = __next_seq(chan, tx_seq); - - if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - + l2cap_data_rcv(chan, skb); goto done;
default: @@@ -5186,10 -4707,11 +5188,10 @@@ drop
done: l2cap_chan_unlock(chan); - - return 0; }
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) +static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, + struct sk_buff *skb) { struct l2cap_chan *chan;
@@@ -5205,15 -4727,17 +5207,15 @@@ if (chan->imtu < skb->len) goto drop;
- if (!chan->ops->recv(chan->data, skb)) - return 0; + if (!chan->ops->recv(chan, skb)) + return;
drop: kfree_skb(skb); - - return 0; }
-static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, - struct sk_buff *skb) +static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid, + struct sk_buff *skb) { struct l2cap_chan *chan;
@@@ -5229,11 -4753,13 +5231,11 @@@ if (chan->imtu < skb->len) goto drop;
- if (!chan->ops->recv(chan->data, skb)) - return 0; + if (!chan->ops->recv(chan, skb)) + return;
drop: kfree_skb(skb); - - return 0; }
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) @@@ -5261,7 -4787,7 +5263,7 @@@
case L2CAP_CID_CONN_LESS: psm = get_unaligned((__le16 *) skb->data); - skb_pull(skb, 2); + skb_pull(skb, L2CAP_PSMLEN_SIZE); l2cap_conless_channel(conn, psm, skb); break;
@@@ -5455,17 -4981,6 +5457,17 @@@ int l2cap_security_cfm(struct hci_conn rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); + + if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && + res == L2CAP_CR_SUCCESS) { + char buf[128]; + set_bit(CONF_REQ_SENT, &chan->conf_state); + l2cap_send_cmd(conn, l2cap_get_ident(conn), + L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf), + buf); + chan->num_conf_req++; + } }
l2cap_chan_unlock(chan); diff --combined net/bluetooth/mgmt.c index c72307c,3e5e336..a6e0f3d --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@@ -24,6 -24,8 +24,6 @@@
/* Bluetooth HCI Management interface */
-#include <linux/kernel.h> -#include <linux/uaccess.h> #include <linux/module.h> #include <asm/unaligned.h>
@@@ -712,8 -714,7 +712,8 @@@ static struct pending_cmd *mgmt_pending }
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, - void (*cb)(struct pending_cmd *cmd, void *data), + void (*cb)(struct pending_cmd *cmd, + void *data), void *data) { struct list_head *p, *n; @@@ -870,7 -871,7 +870,7 @@@ static int set_discoverable(struct soc }
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; @@@ -977,7 -978,7 +977,7 @@@ static int set_connectable(struct sock }
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_BUSY); goto failed; @@@ -1000,7 -1001,7 +1000,7 @@@ scan = 0;
if (test_bit(HCI_ISCAN, &hdev->flags) && - hdev->discov_timeout > 0) + hdev->discov_timeout > 0) cancel_delayed_work(&hdev->discov_off); }
@@@ -1055,7 -1056,7 +1055,7 @@@ static int set_link_security(struct soc bool changed = false;
if (!!cp->val != test_bit(HCI_LINK_SECURITY, - &hdev->dev_flags)) { + &hdev->dev_flags)) { change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); changed = true; } @@@ -1316,7 -1317,7 +1316,7 @@@ static bool enable_service_cache(struc }
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_remove_uuid *cp = data; struct pending_cmd *cmd; @@@ -1441,7 -1442,7 +1441,7 @@@ unlock }
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_load_link_keys *cp = data; u16 key_count, expected_len; @@@ -1453,13 -1454,13 +1453,13 @@@ sizeof(struct mgmt_link_key_info); if (expected_len != len) { BT_ERR("load_link_keys: expected %u bytes, got %u bytes", - len, expected_len); + len, expected_len); return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); }
BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, - key_count); + key_count);
hci_dev_lock(hdev);
@@@ -1534,10 -1535,10 +1534,10 @@@ static int unpair_device(struct sock *s if (cp->disconnect) { if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, - &cp->addr.bdaddr); + &cp->addr.bdaddr); } else { conn = NULL; } @@@ -1593,12 -1594,11 +1593,12 @@@ static int disconnect(struct sock *sk, }
if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
- if (!conn) { + if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_NOT_CONNECTED); goto failed; @@@ -1813,7 -1813,7 +1813,7 @@@ static int set_io_capability(struct soc hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name, - hdev->io_capability); + hdev->io_capability);
hci_dev_unlock(hdev);
@@@ -1821,7 -1821,7 +1821,7 @@@ 0); }
-static inline struct pending_cmd *find_pairing(struct hci_conn *conn) +static struct pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct pending_cmd *cmd; @@@ -1927,15 -1927,8 +1927,15 @@@ static int pair_device(struct sock *sk rp.addr.type = cp->addr.type;
if (IS_ERR(conn)) { + int status; + + if (PTR_ERR(conn) == -EBUSY) + status = MGMT_STATUS_BUSY; + else + status = MGMT_STATUS_CONNECT_FAILED; + err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_CONNECT_FAILED, &rp, + status, &rp, sizeof(rp)); goto unlock; } @@@ -1966,7 -1959,7 +1966,7 @@@ cmd->user_data = conn;
if (conn->state == BT_CONNECTED && - hci_conn_security(conn, sec_level, auth_type)) + hci_conn_security(conn, sec_level, auth_type)) pairing_complete(cmd, 0);
err = 0; @@@ -2263,7 -2256,7 +2263,7 @@@ unlock }
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, - void *data, u16 len) + void *data, u16 len) { struct mgmt_cp_remove_remote_oob_data *cp = data; u8 status; @@@ -2432,7 -2425,7 +2432,7 @@@ static int stop_discovery(struct sock *
case DISCOVERY_RESOLVING: e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, - NAME_PENDING); + NAME_PENDING); if (!e) { mgmt_pending_remove(cmd); err = cmd_complete(sk, hdev->id, @@@ -2654,7 -2647,7 +2654,7 @@@ static int load_long_term_keys(struct s sizeof(struct mgmt_ltk_info); if (expected_len != len) { BT_ERR("load_keys: expected %u bytes, got %u bytes", - len, expected_len); + len, expected_len); return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, EINVAL); } @@@ -2779,7 -2772,7 +2779,7 @@@ int mgmt_control(struct sock *sk, struc }
if (opcode >= ARRAY_SIZE(mgmt_handlers) || - mgmt_handlers[opcode].func == NULL) { + mgmt_handlers[opcode].func == NULL) { BT_DBG("Unknown op %u", opcode); err = cmd_status(sk, index, opcode, MGMT_STATUS_UNKNOWN_COMMAND); @@@ -2787,7 -2780,7 +2787,7 @@@ }
if ((hdev && opcode < MGMT_OP_READ_INFO) || - (!hdev && opcode >= MGMT_OP_READ_INFO)) { + (!hdev && opcode >= MGMT_OP_READ_INFO)) { err = cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; @@@ -2796,7 -2789,7 +2796,7 @@@ handler = &mgmt_handlers[opcode];
if ((handler->var_len && len < handler->data_len) || - (!handler->var_len && len != handler->data_len)) { + (!handler->var_len && len != handler->data_len)) { err = cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_PARAMS); goto done; @@@ -2980,7 -2973,7 +2980,7 @@@ int mgmt_new_link_key(struct hci_dev *h bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; - memcpy(ev.key.val, key->val, 16); + memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); ev.key.pin_len = key->pin_len;
return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); @@@ -3115,7 -3108,7 +3115,7 @@@ int mgmt_disconnect_failed(struct hci_d mgmt_pending_remove(cmd);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); + hdev); return err; }
@@@ -3205,7 -3198,7 +3205,7 @@@ int mgmt_user_confirm_request(struct hc }
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type) + u8 link_type, u8 addr_type) { struct mgmt_ev_user_passkey_request ev;
@@@ -3219,8 -3212,8 +3219,8 @@@ }
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type, u8 status, - u8 opcode) + u8 link_type, u8 addr_type, u8 status, + u8 opcode) { struct pending_cmd *cmd; struct mgmt_rp_user_confirm_reply rp; @@@ -3251,8 -3244,7 +3251,8 @@@ int mgmt_user_confirm_neg_reply_complet u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, - status, MGMT_OP_USER_CONFIRM_NEG_REPLY); + status, + MGMT_OP_USER_CONFIRM_NEG_REPLY); }
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, @@@ -3266,8 -3258,7 +3266,8 @@@ int mgmt_user_passkey_neg_reply_complet u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, - status, MGMT_OP_USER_PASSKEY_NEG_REPLY); + status, + MGMT_OP_USER_PASSKEY_NEG_REPLY); }
int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, diff --combined net/ipv6/ip6_fib.c index 215afc7,6083276..13690d6 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@@ -197,7 -197,6 +197,7 @@@ static struct fib6_table *fib6_alloc_ta table->tb6_id = id; table->tb6_root.leaf = net->ipv6.ip6_null_entry; table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; + inet_peer_base_init(&table->tb6_peers); }
return table; @@@ -1350,8 -1349,8 +1350,8 @@@ static int fib6_walk_continue(struct fi if (w->leaf && fn->fn_flags & RTN_RTINFO) { int err;
- if (w->count < w->skip) { - w->count++; + if (w->skip) { + w->skip--; continue; }
@@@ -1634,7 -1633,6 +1634,7 @@@ static int __net_init fib6_net_init(str net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; net->ipv6.fib6_main_tbl->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; + inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), @@@ -1645,7 -1643,6 +1645,7 @@@ net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; net->ipv6.fib6_local_tbl->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; + inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers); #endif fib6_tables_init(net);
@@@ -1669,10 -1666,8 +1669,10 @@@ static void fib6_net_exit(struct net *n del_timer_sync(&net->ipv6.ip6_fib_timer);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES + inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers); kfree(net->ipv6.fib6_local_tbl); #endif + inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers); kfree(net->ipv6.fib6_main_tbl); kfree(net->ipv6.fib_table_hash); kfree(net->ipv6.rt6_stats); diff --combined net/ipv6/route.c index e649cd7,becb048..c5bbece --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -99,7 -99,10 +99,7 @@@ static u32 *ipv6_cow_metrics(struct dst if (!(rt->dst.flags & DST_HOST)) return NULL;
- if (!rt->rt6i_peer) - rt6_bind_peer(rt, 1); - - peer = rt->rt6i_peer; + peer = rt6_get_peer_create(rt); if (peer) { u32 *old_p = __DST_METRICS_PTR(old); unsigned long prev, new; @@@ -258,19 -261,16 +258,19 @@@ static struct rt6_info ip6_blk_hole_ent #endif
/* allocate dst with ip6_dst_ops */ -static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, +static inline struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, - int flags) + int flags, + struct fib6_table *table) { - struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); + struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, + 0, 0, flags);
- if (rt) + if (rt) { memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); - + rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); + } return rt; }
@@@ -278,6 -278,7 +278,6 @@@ static void ip6_dst_destroy(struct dst_ { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; - struct inet_peer *peer = rt->rt6i_peer;
if (!(rt->dst.flags & DST_HOST)) dst_destroy_metrics_generic(dst); @@@ -290,8 -291,8 +290,8 @@@ if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) dst_release(dst->from);
- if (peer) { - rt->rt6i_peer = NULL; + if (rt6_has_peer(rt)) { + struct inet_peer *peer = rt6_peer_ptr(rt); inet_putpeer(peer); } } @@@ -305,20 -306,13 +305,20 @@@ static u32 rt6_peer_genid(void
void rt6_bind_peer(struct rt6_info *rt, int create) { + struct inet_peer_base *base; struct inet_peer *peer;
- peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create); - if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL) - inet_putpeer(peer); - else - rt->rt6i_peer_genid = rt6_peer_genid(); + base = inetpeer_base_ptr(rt->_rt6i_peer); + if (!base) + return; + + peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create); + if (peer) { + if (!rt6_set_peer(rt, peer)) + inet_putpeer(peer); + else + rt->rt6i_peer_genid = rt6_peer_genid(); + } }
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, @@@ -958,7 -952,6 +958,7 @@@ struct dst_entry *ip6_blackhole_route(s rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0); if (rt) { memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); + rt6_init_peer(rt, net->ipv6.peers);
new = &rt->dst;
@@@ -1003,7 -996,7 +1003,7 @@@ static struct dst_entry *ip6_dst_check(
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { if (rt->rt6i_peer_genid != rt6_peer_genid()) { - if (!rt->rt6i_peer) + if (!rt6_has_peer(rt)) rt6_bind_peer(rt, 0); rt->rt6i_peer_genid = rt6_peer_genid(); } @@@ -1049,10 -1042,7 +1049,10 @@@ static void ip6_rt_update_pmtu(struct d { struct rt6_info *rt6 = (struct rt6_info*)dst;
+ dst_confirm(dst); if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { + struct net *net = dev_net(dst->dev); + rt6->rt6i_flags |= RTF_MODIFIED; if (mtu < IPV6_MIN_MTU) { u32 features = dst_metric(dst, RTAX_FEATURES); @@@ -1061,39 -1051,9 +1061,39 @@@ dst_metric_set(dst, RTAX_FEATURES, features); } dst_metric_set(dst, RTAX_MTU, mtu); + rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires); } }
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, + int oif, u32 mark) +{ + const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; + struct dst_entry *dst; + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_oif = oif; + fl6.flowi6_mark = mark; + fl6.flowi6_flags = FLOWI_FLAG_PRECOW_METRICS; + fl6.daddr = iph->daddr; + fl6.saddr = iph->saddr; + fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK; + + dst = ip6_route_output(net, NULL, &fl6); + if (!dst->error) + ip6_rt_update_pmtu(dst, ntohl(mtu)); + dst_release(dst); +} +EXPORT_SYMBOL_GPL(ip6_update_pmtu); + +void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) +{ + ip6_update_pmtu(skb, sock_net(sk), mtu, + sk->sk_bound_dev_if, sk->sk_mark); +} +EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); + static unsigned int ip6_default_advmss(const struct dst_entry *dst) { struct net_device *dev = dst->dev; @@@ -1150,7 -1110,7 +1150,7 @@@ struct dst_entry *icmp6_dst_alloc(struc if (unlikely(!idev)) return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0); + rt = ip6_dst_alloc(net, dev, 0, NULL); if (unlikely(!rt)) { in6_dev_put(idev); dst = ERR_PTR(-ENOMEM); @@@ -1332,7 -1292,7 +1332,7 @@@ int ip6_route_add(struct fib6_config *c if (!table) goto out;
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT); + rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
if (!rt) { err = -ENOMEM; @@@ -1737,6 -1697,116 +1737,6 @@@ out }
/* - * Handle ICMP "packet too big" messages - * i.e. Path MTU discovery - */ - -static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr, - struct net *net, u32 pmtu, int ifindex) -{ - struct rt6_info *rt, *nrt; - int allfrag = 0; -again: - rt = rt6_lookup(net, daddr, saddr, ifindex, 0); - if (!rt) - return; - - if (rt6_check_expired(rt)) { - ip6_del_rt(rt); - goto again; - } - - if (pmtu >= dst_mtu(&rt->dst)) - goto out; - - if (pmtu < IPV6_MIN_MTU) { - /* - * According to RFC2460, PMTU is set to the IPv6 Minimum Link - * MTU (1280) and a fragment header should always be included - * after a node receiving Too Big message reporting PMTU is - * less than the IPv6 Minimum Link MTU. - */ - pmtu = IPV6_MIN_MTU; - allfrag = 1; - } - - /* New mtu received -> path was valid. - They are sent only in response to data packets, - so that this nexthop apparently is reachable. --ANK - */ - dst_confirm(&rt->dst); - - /* Host route. If it is static, it would be better - not to override it, but add new one, so that - when cache entry will expire old pmtu - would return automatically. - */ - if (rt->rt6i_flags & RTF_CACHE) { - dst_metric_set(&rt->dst, RTAX_MTU, pmtu); - if (allfrag) { - u32 features = dst_metric(&rt->dst, RTAX_FEATURES); - features |= RTAX_FEATURE_ALLFRAG; - dst_metric_set(&rt->dst, RTAX_FEATURES, features); - } - rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); - rt->rt6i_flags |= RTF_MODIFIED; - goto out; - } - - /* Network route. - Two cases are possible: - 1. It is connected route. Action: COW - 2. It is gatewayed route or NONEXTHOP route. Action: clone it. - */ - if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) - nrt = rt6_alloc_cow(rt, daddr, saddr); - else - nrt = rt6_alloc_clone(rt, daddr); - - if (nrt) { - dst_metric_set(&nrt->dst, RTAX_MTU, pmtu); - if (allfrag) { - u32 features = dst_metric(&nrt->dst, RTAX_FEATURES); - features |= RTAX_FEATURE_ALLFRAG; - dst_metric_set(&nrt->dst, RTAX_FEATURES, features); - } - - /* According to RFC 1981, detecting PMTU increase shouldn't be - * happened within 5 mins, the recommended timer is 10 mins. - * Here this route expiration time is set to ip6_rt_mtu_expires - * which is 10 mins. After 10 mins the decreased pmtu is expired - * and detecting PMTU increase will be automatically happened. - */ - rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires); - nrt->rt6i_flags |= RTF_DYNAMIC; - ip6_ins_rt(nrt); - } -out: - dst_release(&rt->dst); -} - -void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr, - struct net_device *dev, u32 pmtu) -{ - struct net *net = dev_net(dev); - - /* - * RFC 1981 states that a node "MUST reduce the size of the packets it - * is sending along the path" that caused the Packet Too Big message. - * Since it's not possible in the general case to determine which - * interface was used to send the original packet, we update the MTU - * on the interface that will be used to send future packets. We also - * update the MTU on the interface that received the Packet Too Big in - * case the original packet was forced out that interface with - * SO_BINDTODEVICE or similar. This is the next best thing to the - * correct behaviour, which would be to update the MTU on all - * interfaces. - */ - rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0); - rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex); -} - -/* * Misc support functions */
@@@ -1744,8 -1814,8 +1744,8 @@@ static struct rt6_info *ip6_rt_copy(str const struct in6_addr *dest) { struct net *net = dev_net(ort->dst.dev); - struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, - ort->dst.dev, 0); + struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0, + ort->rt6i_table);
if (rt) { rt->dst.input = ort->dst.input; @@@ -2029,7 -2099,8 +2029,7 @@@ struct rt6_info *addrconf_dst_alloc(str bool anycast) { struct net *net = dev_net(idev->dev); - struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, - net->loopback_dev, 0); + struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); int err;
if (!rt) { @@@ -2450,9 -2521,7 +2450,9 @@@ static int rt6_fill_node(struct net *ne else expires = INT_MAX;
- peer = rt->rt6i_peer; + peer = NULL; + if (rt6_has_peer(rt)) + peer = rt6_peer_ptr(rt); ts = tsage = 0; if (peer && peer->tcp_ts_stamp) { ts = peer->tcp_ts; @@@ -2888,10 -2957,6 +2888,6 @@@ static int __net_init ip6_route_net_ini net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
- #ifdef CONFIG_PROC_FS - proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); - proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); - #endif net->ipv6.ip6_rt_gc_expire = 30*HZ;
ret = 0; @@@ -2912,10 -2977,6 +2908,6 @@@ out_ip6_dst_ops
static void __net_exit ip6_route_net_exit(struct net *net) { - #ifdef CONFIG_PROC_FS - proc_net_remove(net, "ipv6_route"); - proc_net_remove(net, "rt6_stats"); - #endif kfree(net->ipv6.ip6_null_entry); #ifdef CONFIG_IPV6_MULTIPLE_TABLES kfree(net->ipv6.ip6_prohibit_entry); @@@ -2924,36 -2985,33 +2916,58 @@@ dst_entries_destroy(&net->ipv6.ip6_dst_ops); }
+ static int __net_init ip6_route_net_init_late(struct net *net) + { + #ifdef CONFIG_PROC_FS + proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); + proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); + #endif + return 0; + } + + static void __net_exit ip6_route_net_exit_late(struct net *net) + { + #ifdef CONFIG_PROC_FS + proc_net_remove(net, "ipv6_route"); + proc_net_remove(net, "rt6_stats"); + #endif + } + static struct pernet_operations ip6_route_net_ops = { .init = ip6_route_net_init, .exit = ip6_route_net_exit, };
+static int __net_init ipv6_inetpeer_init(struct net *net) +{ + struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); + + if (!bp) + return -ENOMEM; + inet_peer_base_init(bp); + net->ipv6.peers = bp; + return 0; +} + +static void __net_exit ipv6_inetpeer_exit(struct net *net) +{ + struct inet_peer_base *bp = net->ipv6.peers; + + net->ipv6.peers = NULL; + inetpeer_invalidate_tree(bp); + kfree(bp); +} + +static struct pernet_operations ipv6_inetpeer_ops = { + .init = ipv6_inetpeer_init, + .exit = ipv6_inetpeer_exit, +}; + + static struct pernet_operations ip6_route_net_late_ops = { + .init = ip6_route_net_init_late, + .exit = ip6_route_net_exit_late, + }; + static struct notifier_block ip6_route_dev_notifier = { .notifier_call = ip6_route_dev_notify, .priority = 0, @@@ -2974,14 -3032,10 +2988,14 @@@ int __init ip6_route_init(void if (ret) goto out_kmem_cache;
- ret = register_pernet_subsys(&ip6_route_net_ops); + ret = register_pernet_subsys(&ipv6_inetpeer_ops); if (ret) goto out_dst_entries;
+ ret = register_pernet_subsys(&ip6_route_net_ops); + if (ret) + goto out_register_inetpeer; + ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
/* Registering of the loopback is done before this portion of code, @@@ -3007,19 -3061,25 +3021,25 @@@ if (ret) goto xfrm6_init;
+ ret = register_pernet_subsys(&ip6_route_net_late_ops); + if (ret) + goto fib6_rules_init; + ret = -ENOBUFS; if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) || __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) || __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL)) - goto fib6_rules_init; + goto out_register_late_subsys;
ret = register_netdevice_notifier(&ip6_route_dev_notifier); if (ret) - goto fib6_rules_init; + goto out_register_late_subsys;
out: return ret;
+ out_register_late_subsys: + unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_init: fib6_rules_cleanup(); xfrm6_init: @@@ -3028,8 -3088,6 +3048,8 @@@ out_fib6_init fib6_gc_cleanup(); out_register_subsys: unregister_pernet_subsys(&ip6_route_net_ops); +out_register_inetpeer: + unregister_pernet_subsys(&ipv6_inetpeer_ops); out_dst_entries: dst_entries_destroy(&ip6_dst_blackhole_ops); out_kmem_cache: @@@ -3040,10 -3098,10 +3060,11 @@@ void ip6_route_cleanup(void) { unregister_netdevice_notifier(&ip6_route_dev_notifier); + unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_cleanup(); xfrm6_fini(); fib6_gc_cleanup(); + unregister_pernet_subsys(&ipv6_inetpeer_ops); unregister_pernet_subsys(&ip6_route_net_ops); dst_entries_destroy(&ip6_dst_blackhole_ops); kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);