[linux-next] LinuxNextTracking branch, master, updated. next-20140514

batman at open-mesh.org batman at open-mesh.org
Thu May 15 00:19:48 CEST 2014


The following commit has been merged in the master branch:
commit d0fdd93079b7dfc079e134edb373d87bc1992de6
Merge: 37be7bc48597afe7a0253d71826d246605fd6306 86b5d251d5ac4dda51a022b34cb29b4ce65a8cd5
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Wed May 14 13:20:04 2014 +1000

    Merge remote-tracking branch 'net-next/master'
    
    Conflicts:
    	net/ipv6/xfrm6_output.c

diff --combined MAINTAINERS
index 8d27b88,bde15ff..e1930ef
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -355,7 -355,7 +355,7 @@@ F:	Documentation/hwmon/adm102
  F:	drivers/hwmon/adm1025.c
  
  ADM1029 HARDWARE MONITOR DRIVER
 -M:	Corentin Labbe <corentin.labbe at geomatys.fr>
 +M:	Corentin Labbe <clabbe.montjoie at gmail.com>
  L:	lm-sensors at lm-sensors.org
  S:	Maintained
  F:	drivers/hwmon/adm1029.c
@@@ -1893,15 -1893,14 +1893,15 @@@ L:	netdev at vger.kernel.or
  S:	Supported
  F:	drivers/net/ethernet/broadcom/bnx2x/
  
 -BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
 +BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
  M:	Christian Daudt <bcm at fixthebug.org>
  M:	Matt Porter <mporter at linaro.org>
  L:	bcm-kernel-feedback-list at broadcom.com
 -T:	git git://git.github.com/broadcom/bcm11351
 +T:	git git://github.com/broadcom/mach-bcm
  S:	Maintained
  F:	arch/arm/mach-bcm/
  F:	arch/arm/boot/dts/bcm113*
 +F:	arch/arm/boot/dts/bcm216*
  F:	arch/arm/boot/dts/bcm281*
  F:	arch/arm/configs/bcm_defconfig
  F:	drivers/mmc/host/sdhci_bcm_kona.c
@@@ -1968,6 -1967,12 +1968,12 @@@ S:	Maintaine
  F:	drivers/bcma/
  F:	include/linux/bcma/
  
+ BROADCOM SYSTEMPORT ETHERNET DRIVER
+ M:	Florian Fainelli <f.fainelli at gmail.com>
+ L:	netdev at vger.kernel.org
+ S:	Supported
+ F:	drivers/net/ethernet/broadcom/bcmsysport.*
+ 
  BROCADE BFA FC SCSI DRIVER
  M:	Anil Gurumurthy <anil.gurumurthy at qlogic.com>
  M:	Sudarsana Kalluru <sudarsana.kalluru at qlogic.com>
@@@ -2416,6 -2421,7 +2422,6 @@@ F:	drivers/net/ethernet/ti/cpmac.
  CPU FREQUENCY DRIVERS
  M:	Rafael J. Wysocki <rjw at rjwysocki.net>
  M:	Viresh Kumar <viresh.kumar at linaro.org>
 -L:	cpufreq at vger.kernel.org
  L:	linux-pm at vger.kernel.org
  S:	Maintained
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
@@@ -2426,6 -2432,7 +2432,6 @@@ F:	include/linux/cpufreq.
  CPU FREQUENCY DRIVERS - ARM BIG LITTLE
  M:	Viresh Kumar <viresh.kumar at linaro.org>
  M:	Sudeep Holla <sudeep.holla at arm.com>
 -L:	cpufreq at vger.kernel.org
  L:	linux-pm at vger.kernel.org
  W:	http://www.arm.com/products/processors/technologies/biglittleprocessing.php
  S:	Maintained
@@@ -5489,15 -5496,15 +5495,15 @@@ F:	Documentation/hwmon/ltc426
  F:	drivers/hwmon/ltc4261.c
  
  LTP (Linux Test Project)
 -M:	Shubham Goyal <shubham at linux.vnet.ibm.com>
  M:	Mike Frysinger <vapier at gentoo.org>
  M:	Cyril Hrubis <chrubis at suse.cz>
 -M:	Caspar Zhang <caspar at casparzhang.com>
  M:	Wanlong Gao <gaowanlong at cn.fujitsu.com>
 +M:	Jan Stancek <jstancek at redhat.com>
 +M:	Stanislav Kholmanskikh <stanislav.kholmanskikh at oracle.com>
 +M:	Alexey Kodanev <alexey.kodanev at oracle.com>
  L:	ltp-list at lists.sourceforge.net (subscribers-only)
 -W:	http://ltp.sourceforge.net/
 +W:	http://linux-test-project.github.io/
  T:	git git://github.com/linux-test-project/ltp.git
 -T:	git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
  S:	Maintained
  
  M32R ARCHITECTURE
@@@ -6703,7 -6710,6 +6709,7 @@@ F:	Documentation/PCI
  F:	drivers/pci/
  F:	include/linux/pci*
  F:	arch/x86/pci/
 +F:	arch/x86/kernel/quirks.c
  
  PCI DRIVER FOR IMX6
  M:	Richard Zhu <r65037 at freescale.com>
@@@ -9107,9 -9113,6 +9113,9 @@@ F:	arch/um/os-Linux/drivers
  
  TURBOCHANNEL SUBSYSTEM
  M:	"Maciej W. Rozycki" <macro at linux-mips.org>
 +M:	Ralf Baechle <ralf at linux-mips.org>
 +L:	linux-mips at linux-mips.org
 +Q:	http://patchwork.linux-mips.org/project/linux-mips/list/
  S:	Maintained
  F:	drivers/tc/
  F:	include/linux/tc.h
@@@ -9963,7 -9966,7 +9969,7 @@@ F:	drivers/net/hamradio/*scc.
  F:	drivers/net/hamradio/z8530.h
  
  ZBUD COMPRESSED PAGE ALLOCATOR
 -M:	Seth Jennings <sjenning at linux.vnet.ibm.com>
 +M:	Seth Jennings <sjennings at variantweb.net>
  L:	linux-mm at kvack.org
  S:	Maintained
  F:	mm/zbud.c
@@@ -10008,7 -10011,7 +10014,7 @@@ F:	mm/zsmalloc.
  F:	include/linux/zsmalloc.h
  
  ZSWAP COMPRESSED SWAP CACHING
 -M:	Seth Jennings <sjenning at linux.vnet.ibm.com>
 +M:	Seth Jennings <sjennings at variantweb.net>
  L:	linux-mm at kvack.org
  S:	Maintained
  F:	mm/zswap.c
diff --combined arch/arm/boot/dts/am33xx.dtsi
index 7ad75b4,baf56cc..f1eea4a
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@@ -144,7 -144,7 +144,7 @@@
  			compatible = "ti,edma3";
  			ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
  			reg =	<0x49000000 0x10000>,
 -				<0x44e10f90 0x10>;
 +				<0x44e10f90 0x40>;
  			interrupts = <12 13 14>;
  			#dma-cells = <1>;
  			dma-channels = <64>;
@@@ -665,6 -665,8 +665,8 @@@
  		mac: ethernet at 4a100000 {
  			compatible = "ti,cpsw";
  			ti,hwmods = "cpgmac0";
+ 			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ 			clock-names = "fck", "cpts";
  			cpdma_channels = <8>;
  			ale_entries = <1024>;
  			bd_ram_size = <0x2000>;
diff --combined drivers/net/macvlan.c
index c5fb9cf,f0118d1..72633fd
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/if_link.h>
  #include <linux/if_macvlan.h>
  #include <linux/hash.h>
+ #include <linux/workqueue.h>
  #include <net/rtnetlink.h>
  #include <net/xfrm.h>
  
@@@ -40,10 -41,18 +41,18 @@@ struct macvlan_port 
  	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
  	struct list_head	vlans;
  	struct rcu_head		rcu;
+ 	struct sk_buff_head	bc_queue;
+ 	struct work_struct	bc_work;
  	bool 			passthru;
  	int			count;
  };
  
+ struct macvlan_skb_cb {
+ 	const struct macvlan_dev *src;
+ };
+ 
+ #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
+ 
  static void macvlan_port_destroy(struct net_device *dev);
  
  static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@@ -120,7 -129,7 +129,7 @@@ static int macvlan_broadcast_one(struc
  	struct net_device *dev = vlan->dev;
  
  	if (local)
- 		return dev_forward_skb(dev, skb);
+ 		return __dev_forward_skb(dev, skb);
  
  	skb->dev = dev;
  	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@@ -128,7 -137,7 +137,7 @@@
  	else
  		skb->pkt_type = PACKET_MULTICAST;
  
- 	return netif_rx(skb);
+ 	return 0;
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -175,32 -184,32 +184,32 @@@ static void macvlan_broadcast(struct sk
  			if (likely(nskb))
  				err = macvlan_broadcast_one(
  					nskb, vlan, eth,
- 					mode == MACVLAN_MODE_BRIDGE);
+ 					mode == MACVLAN_MODE_BRIDGE) ?:
+ 				      netif_rx_ni(nskb);
  			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
  					 err == NET_RX_SUCCESS, 1);
  		}
  	}
  }
  
- /* called under rcu_read_lock() from netif_receive_skb */
- static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ static void macvlan_process_broadcast(struct work_struct *w)
  {
- 	struct macvlan_port *port;
- 	struct sk_buff *skb = *pskb;
- 	const struct ethhdr *eth = eth_hdr(skb);
- 	const struct macvlan_dev *vlan;
- 	const struct macvlan_dev *src;
- 	struct net_device *dev;
- 	unsigned int len = 0;
- 	int ret = NET_RX_DROP;
+ 	struct macvlan_port *port = container_of(w, struct macvlan_port,
+ 						 bc_work);
+ 	struct sk_buff *skb;
+ 	struct sk_buff_head list;
+ 
+ 	skb_queue_head_init(&list);
+ 
+ 	spin_lock_bh(&port->bc_queue.lock);
+ 	skb_queue_splice_tail_init(&port->bc_queue, &list);
+ 	spin_unlock_bh(&port->bc_queue.lock);
+ 
+ 	while ((skb = __skb_dequeue(&list))) {
+ 		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+ 
+ 		rcu_read_lock();
  
- 	port = macvlan_port_get_rcu(skb->dev);
- 	if (is_multicast_ether_addr(eth->h_dest)) {
- 		skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
- 		if (!skb)
- 			return RX_HANDLER_CONSUMED;
- 		eth = eth_hdr(skb);
- 		src = macvlan_hash_lookup(port, eth->h_source);
  		if (!src)
  			/* frame comes from an external address */
  			macvlan_broadcast(skb, port, NULL,
@@@ -213,20 -222,80 +222,80 @@@
  			macvlan_broadcast(skb, port, src->dev,
  					  MACVLAN_MODE_VEPA |
  					  MACVLAN_MODE_BRIDGE);
- 		else if (src->mode == MACVLAN_MODE_BRIDGE)
+ 		else
  			/*
  			 * flood only to VEPA ports, bridge ports
  			 * already saw the frame on the way out.
  			 */
  			macvlan_broadcast(skb, port, src->dev,
  					  MACVLAN_MODE_VEPA);
- 		else {
+ 
+ 		rcu_read_unlock();
+ 
+ 		kfree_skb(skb);
+ 	}
+ }
+ 
+ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ 				      struct sk_buff *skb)
+ {
+ 	struct sk_buff *nskb;
+ 	int err = -ENOMEM;
+ 
+ 	nskb = skb_clone(skb, GFP_ATOMIC);
+ 	if (!nskb)
+ 		goto err;
+ 
+ 	spin_lock(&port->bc_queue.lock);
+ 	if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+ 		__skb_queue_tail(&port->bc_queue, nskb);
+ 		err = 0;
+ 	}
+ 	spin_unlock(&port->bc_queue.lock);
+ 
+ 	if (err)
+ 		goto free_nskb;
+ 
+ 	schedule_work(&port->bc_work);
+ 	return;
+ 
+ free_nskb:
+ 	kfree_skb(nskb);
+ err:
+ 	atomic_long_inc(&skb->dev->rx_dropped);
+ }
+ 
+ /* called under rcu_read_lock() from netif_receive_skb */
+ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ {
+ 	struct macvlan_port *port;
+ 	struct sk_buff *skb = *pskb;
+ 	const struct ethhdr *eth = eth_hdr(skb);
+ 	const struct macvlan_dev *vlan;
+ 	const struct macvlan_dev *src;
+ 	struct net_device *dev;
+ 	unsigned int len = 0;
+ 	int ret = NET_RX_DROP;
+ 
+ 	port = macvlan_port_get_rcu(skb->dev);
+ 	if (is_multicast_ether_addr(eth->h_dest)) {
+ 		skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ 		if (!skb)
+ 			return RX_HANDLER_CONSUMED;
+ 		eth = eth_hdr(skb);
+ 		src = macvlan_hash_lookup(port, eth->h_source);
+ 		if (src && src->mode != MACVLAN_MODE_VEPA &&
+ 		    src->mode != MACVLAN_MODE_BRIDGE) {
  			/* forward to original port. */
  			vlan = src;
- 			ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+ 			ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
+ 			      netif_rx(skb);
  			goto out;
  		}
  
+ 		MACVLAN_SKB_CB(skb)->src = src;
+ 		macvlan_broadcast_enqueue(port, skb);
+ 
  		return RX_HANDLER_PASS;
  	}
  
@@@ -458,10 -527,8 +527,10 @@@ static void macvlan_change_rx_flags(str
  	struct macvlan_dev *vlan = netdev_priv(dev);
  	struct net_device *lowerdev = vlan->lowerdev;
  
 -	if (change & IFF_ALLMULTI)
 -		dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
 +	if (dev->flags & IFF_UP) {
 +		if (change & IFF_ALLMULTI)
 +			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
 +	}
  }
  
  static void macvlan_set_mac_lists(struct net_device *dev)
@@@ -763,6 -830,9 +832,9 @@@ static int macvlan_port_create(struct n
  	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
  		INIT_HLIST_HEAD(&port->vlan_hash[i]);
  
+ 	skb_queue_head_init(&port->bc_queue);
+ 	INIT_WORK(&port->bc_work, macvlan_process_broadcast);
+ 
  	err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
  	if (err)
  		kfree(port);
@@@ -775,6 -845,7 +847,7 @@@ static void macvlan_port_destroy(struc
  {
  	struct macvlan_port *port = macvlan_port_get_rtnl(dev);
  
+ 	cancel_work_sync(&port->bc_work);
  	dev->priv_flags &= ~IFF_MACVLAN_PORT;
  	netdev_rx_handler_unregister(dev);
  	kfree_rcu(port, rcu);
diff --combined drivers/net/wireless/iwlwifi/mvm/coex.c
index 0489314,8f4b03d..4284672
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@@ -104,11 -104,8 +104,8 @@@ static const u8 iwl_bt_prio_tbl[BT_COEX
  #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD	(-65)
  #define BT_ANTENNA_COUPLING_THRESHOLD		(30)
  
- int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+ static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
  {
- 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- 		return 0;
- 
  	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
  				    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
  				    &iwl_bt_prio_tbl);
@@@ -573,8 -570,9 +570,9 @@@ int iwl_send_bt_init_conf(struct iwl_mv
  	int ret;
  	u32 flags;
  
- 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- 		return 0;
+ 	ret = iwl_send_bt_prio_tbl(mvm);
+ 	if (ret)
+ 		return ret;
  
  	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
  	if (!bt_cmd)
@@@ -582,10 -580,12 +580,12 @@@
  	cmd.data[0] = bt_cmd;
  
  	bt_cmd->max_kill = 5;
- 	bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
- 	bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
- 	bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
- 	bt_cmd->bt4_tx_rx_max_freq0 = 15,
+ 	bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ 	bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+ 	bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+ 	bt_cmd->bt4_tx_rx_max_freq0 = 15;
+ 	bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+ 	bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
  
  	flags = iwlwifi_mod_params.bt_coex_active ?
  			BT_COEX_NW : BT_COEX_DISABLE;
@@@ -611,14 -611,14 +611,14 @@@
  		bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
  
  	if (IWL_MVM_BT_COEX_CORUNNING) {
 -		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
 -						    BT_VALID_CORUN_LUT_40);
 +		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
 +						     BT_VALID_CORUN_LUT_40);
  		bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
  	}
  
  	if (IWL_MVM_BT_COEX_MPLUT) {
  		bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
 -		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
 +		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
  	}
  
  	if (mvm->cfg->bt_shared_single_ant)
@@@ -1215,6 -1215,17 +1215,17 @@@ bool iwl_mvm_bt_coex_is_mimo_allowed(st
  	return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
  }
  
+ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ 				    enum ieee80211_band band)
+ {
+ 	u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+ 
+ 	if (band != IEEE80211_BAND_2GHZ)
+ 		return false;
+ 
+ 	return bt_activity >= BT_LOW_TRAFFIC;
+ }
+ 
  u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
  			   struct ieee80211_tx_info *info, u8 ac)
  {
@@@ -1249,9 -1260,6 +1260,6 @@@
  
  void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
  {
- 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- 		return;
- 
  	iwl_mvm_bt_coex_notif_handle(mvm);
  }
  
diff --combined drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d73a89e,6174c02..6959fda
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@@ -169,8 -169,12 +169,12 @@@ enum iwl_scan_type 
  	SCAN_TYPE_DISCOVERY_FORCED	= 6,
  }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
  
- /* Maximal number of channels to scan */
- #define MAX_NUM_SCAN_CHANNELS 0x24
+ /**
+  * Maximal number of channels to scan
+  * it should be equal to:
+  * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+  */
+ #define MAX_NUM_SCAN_CHANNELS 50
  
  /**
   * struct iwl_scan_cmd - scan request command
@@@ -183,9 -187,9 +187,9 @@@
   *	this number of packets were received (typically 1)
   * @passive2active: is auto switching from passive to active during scan allowed
   * @rxchain_sel_flags: RXON_RX_CHAIN_*
 - * @max_out_time: in usecs, max out of serving channel time
 + * @max_out_time: in TUs, max out of serving channel time
   * @suspend_time: how long to pause scan when returning to service channel:
 - *	bits 0-19: beacon interal in usecs (suspend before executing)
 + *	bits 0-19: beacon interal in TUs (suspend before executing)
   *	bits 20-23: reserved
   *	bits 24-31: number of beacons (suspend between channels)
   * @rxon_flags: RXON_FLG_*
@@@ -383,8 -387,8 +387,8 @@@ enum scan_framework_client 
   * @quiet_plcp_th:	quiet channel num of packets threshold
   * @good_CRC_th:	passive to active promotion threshold
   * @rx_chain:		RXON rx chain.
 - * @max_out_time:	max uSec to be out of assoceated channel
 - * @suspend_time:	pause scan this long when returning to service channel
 + * @max_out_time:	max TUs to be out of assoceated channel
 + * @suspend_time:	pause scan this TUs when returning to service channel
   * @flags:		RXON flags
   * @filter_flags:	RXONfilter
   * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.
@@@ -534,13 -538,16 +538,16 @@@ struct iwl_scan_offload_schedule 
   *
   * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
   * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
-  * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
-  *	on A band.
+  * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+  *	beacon period. Finding channel activity in this mode is not guaranteed.
+  * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+  *	Assuming beacon period is 100ms finding channel activity is guaranteed.
   */
  enum iwl_scan_offload_flags {
  	IWL_SCAN_OFFLOAD_FLAG_PASS_ALL		= BIT(0),
  	IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL	= BIT(2),
- 	IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN	= BIT(3),
+ 	IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE	= BIT(5),
+ 	IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE	= BIT(6),
  };
  
  /**
@@@ -563,17 -570,24 +570,24 @@@ enum iwl_scan_offload_compleate_status 
  	IWL_SCAN_OFFLOAD_ABORTED	= 2,
  };
  
+ enum iwl_scan_ebs_status {
+ 	IWL_SCAN_EBS_SUCCESS,
+ 	IWL_SCAN_EBS_FAILED,
+ 	IWL_SCAN_EBS_CHAN_NOT_FOUND,
+ };
+ 
  /**
   * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
   * @last_schedule_line:		last schedule line executed (fast or regular)
   * @last_schedule_iteration:	last scan iteration executed before scan abort
   * @status:			enum iwl_scan_offload_compleate_status
+  * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
   */
  struct iwl_scan_offload_complete {
  	u8 last_schedule_line;
  	u8 last_schedule_iteration;
  	u8 status;
- 	u8 reserved;
+ 	u8 ebs_status;
  } __packed;
  
  /**
diff --combined drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b41dc84,97c3dea..32682ed
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@@ -276,6 -276,7 +276,7 @@@ int iwl_mvm_mac_setup_register(struct i
  		    IEEE80211_HW_AMPDU_AGGREGATION |
  		    IEEE80211_HW_TIMING_BEACON_ONLY |
  		    IEEE80211_HW_CONNECTION_MONITOR |
+ 		    IEEE80211_HW_SUPPORTS_UAPSD |
  		    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
  		    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
  
@@@ -285,6 -286,8 +286,8 @@@
  				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
  	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
  	hw->rate_control_algorithm = "iwl-mvm-rs";
+ 	hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
  
  	/*
  	 * Enable 11w if advertised by firmware and software crypto
@@@ -295,11 -298,9 +298,9 @@@
  	    !iwlwifi_mod_params.sw_crypto)
  		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  
- 	if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
- 		hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
- 		hw->uapsd_queues = IWL_UAPSD_AC_INFO;
- 		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- 	}
+ 	/* Disable uAPSD due to firmware issues */
+ 	if (true)
+ 		hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
  
  	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
  	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@@ -309,11 -310,8 +310,8 @@@
  		BIT(NL80211_IFTYPE_P2P_CLIENT) |
  		BIT(NL80211_IFTYPE_AP) |
  		BIT(NL80211_IFTYPE_P2P_GO) |
- 		BIT(NL80211_IFTYPE_P2P_DEVICE);
- 
- 	/* IBSS has bugs in older versions */
- 	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- 		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ 		BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ 		BIT(NL80211_IFTYPE_ADHOC);
  
  	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
  	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@@ -365,14 -363,11 +363,11 @@@
  	else
  		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  
- 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
- 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- 		hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- 		hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- 		/* we create the 802.11 header and zero length SSID IE. */
- 		hw->wiphy->max_sched_scan_ie_len =
- 					SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- 	}
+ 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ 	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ 	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ 	/* we create the 802.11 header and zero length SSID IE. */
+ 	hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
  
  	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
  			       NL80211_FEATURE_P2P_GO_OPPPS;
@@@ -386,7 -381,11 +381,11 @@@
  	}
  
  #ifdef CONFIG_PM_SLEEP
- 	if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ 	if (iwl_mvm_is_d0i3_supported(mvm) &&
+ 	    device_can_wakeup(mvm->trans->dev)) {
+ 		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+ 		hw->wiphy->wowlan = &mvm->wowlan;
+ 	} else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
  	    mvm->trans->ops->d3_suspend &&
  	    mvm->trans->ops->d3_resume &&
  	    device_can_wakeup(mvm->trans->dev)) {
@@@ -827,8 -826,7 +826,7 @@@ static int iwl_mvm_mac_add_interface(st
  		goto out_remove_mac;
  
  	if (!mvm->bf_allowed_vif &&
- 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- 	    mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+ 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
  		mvm->bf_allowed_vif = mvmvif;
  		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
  				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@@ -1007,7 -1005,7 +1005,7 @@@ static void iwl_mvm_mc_iface_iterator(v
  	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
  	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
  
 -	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
 +	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
  	if (ret)
  		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
  }
@@@ -1023,7 -1021,7 +1021,7 @@@ static void iwl_mvm_recalc_multicast(st
  	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
  		return;
  
 -	ieee80211_iterate_active_interfaces(
 +	ieee80211_iterate_active_interfaces_atomic(
  		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
  		iwl_mvm_mc_iface_iterator, &iter_data);
  }
@@@ -1223,6 -1221,10 +1221,10 @@@ static int iwl_mvm_configure_bcast_filt
  	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
  		return 0;
  
+ 	/* bcast filtering isn't supported for P2P client */
+ 	if (vif->p2p)
+ 		return 0;
+ 
  	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
  		return 0;
  
@@@ -1697,6 -1699,11 +1699,11 @@@ static int iwl_mvm_mac_sta_state(struc
  		ret = iwl_mvm_add_sta(mvm, vif, sta);
  	} else if (old_state == IEEE80211_STA_NONE &&
  		   new_state == IEEE80211_STA_AUTH) {
+ 		/*
+ 		 * EBS may be disabled due to previous failures reported by FW.
+ 		 * Reset EBS status here assuming environment has been changed.
+ 		 */
+ 		mvm->last_ebs_successful = true;
  		ret = 0;
  	} else if (old_state == IEEE80211_STA_AUTH &&
  		   new_state == IEEE80211_STA_ASSOC) {
@@@ -1807,11 -1814,6 +1814,11 @@@ static int iwl_mvm_mac_sched_scan_start
  
  	mutex_lock(&mvm->mutex);
  
 +	if (!iwl_mvm_is_idle(mvm)) {
 +		ret = -EBUSY;
 +		goto out;
 +	}
 +
  	switch (mvm->scan_status) {
  	case IWL_MVM_SCAN_OS:
  		IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --combined drivers/net/wireless/iwlwifi/mvm/mvm.h
index f1ec098,17c42da..107d864
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@@ -164,7 -164,6 +164,6 @@@ enum iwl_dbgfs_pm_mask 
  	MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
  	MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
  	MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
- 	MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
  	MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
  	MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
  	MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@@ -177,7 -176,6 +176,6 @@@ struct iwl_dbgfs_pm 
  	u32 tx_data_timeout;
  	bool skip_over_dtim;
  	u8 skip_dtim_periods;
- 	bool disable_power_off;
  	bool lprx_ena;
  	u32 lprx_rssi_threshold;
  	bool snooze_ena;
@@@ -232,6 -230,7 +230,7 @@@ enum iwl_mvm_ref_type 
  	IWL_MVM_REF_USER,
  	IWL_MVM_REF_TX,
  	IWL_MVM_REF_TX_AGG,
+ 	IWL_MVM_REF_EXIT_WORK,
  
  	IWL_MVM_REF_COUNT,
  };
@@@ -265,6 -264,7 +264,7 @@@ struct iwl_mvm_vif_bf_data 
   * @uploaded: indicates the MAC context has been added to the device
   * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
   *	should get quota etc.
+  * @pm_enabled - Indicate if MAC power management is allowed
   * @monitor_active: indicates that monitor context is configured, and that the
   *	interface should get quota etc.
   * @low_latency: indicates that this interface is in low-latency mode
@@@ -283,6 -283,7 +283,7 @@@ struct iwl_mvm_vif 
  
  	bool uploaded;
  	bool ap_ibss_active;
+ 	bool pm_enabled;
  	bool monitor_active;
  	bool low_latency;
  	struct iwl_mvm_vif_bf_data bf_data;
@@@ -451,6 -452,11 +452,11 @@@ struct iwl_mvm_frame_stats 
  	int last_frame_idx;
  };
  
+ enum {
+ 	D0I3_DEFER_WAKEUP,
+ 	D0I3_PENDING_WAKEUP,
+ };
+ 
  struct iwl_mvm {
  	/* for logger access */
  	struct device *dev;
@@@ -535,6 -541,8 +541,8 @@@
  	/* Internal station */
  	struct iwl_mvm_int_sta aux_sta;
  
+ 	bool last_ebs_successful;
+ 
  	u8 scan_last_antenna_idx; /* to toggle TX between antennas */
  	u8 mgmt_last_antenna_idx;
  
@@@ -578,6 -586,8 +586,8 @@@
  	void *fw_error_dump;
  	void *fw_error_sram;
  	u32 fw_error_sram_len;
+ 	u32 *fw_error_rxf;
+ 	u32 fw_error_rxf_len;
  
  	struct led_classdev led;
  
@@@ -601,6 -611,9 +611,9 @@@
  	bool d0i3_offloading;
  	struct work_struct d0i3_exit_work;
  	struct sk_buff_head d0i3_tx;
+ 	/* protect d0i3_suspend_flags */
+ 	struct mutex d0i3_suspend_mutex;
+ 	unsigned long d0i3_suspend_flags;
  	/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
  	spinlock_t d0i3_tx_lock;
  	wait_queue_head_t d0i3_exit_waitq;
@@@ -629,8 -642,6 +642,6 @@@
  
  	/* Indicate if device power save is allowed */
  	bool ps_disabled;
- 	/* Indicate if device power management is allowed */
- 	bool pm_disabled;
  };
  
  /* Extract MVM priv from op_mode and _hw */
@@@ -705,6 -716,7 +716,7 @@@ void iwl_mvm_dump_nic_error_log(struct 
  #ifdef CONFIG_IWLWIFI_DEBUGFS
  void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
  void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+ void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
  #endif
  u8 first_antenna(u8 mask);
  u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@@ -874,8 -886,6 +886,6 @@@ void iwl_mvm_update_frame_stats(struct 
  int rs_pretty_print_rate(char *buf, const u32 rate);
  
  /* power management */
- int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
- 
  int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
  int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
  int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@@ -922,9 -932,9 +932,9 @@@ int iwl_mvm_send_proto_offload(struct i
  void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
  void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
  void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
  
  /* BT Coex */
- int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
  int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
  int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
  			     struct iwl_rx_cmd_buffer *rxb,
@@@ -936,6 -946,8 +946,8 @@@ u16 iwl_mvm_coex_agg_time_limit(struct 
  				struct ieee80211_sta *sta);
  bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
  				     struct ieee80211_sta *sta);
+ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ 				    enum ieee80211_band band);
  u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
  			   struct ieee80211_tx_info *info, u8 ac);
  int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
@@@ -1003,9 -1015,6 +1015,9 @@@ static inline bool iwl_mvm_vif_low_late
  	return mvmvif->low_latency;
  }
  
 +/* Assoc status */
 +bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
 +
  /* Thermal management and CT-kill */
  void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
  void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --combined drivers/net/wireless/iwlwifi/mvm/rs.c
index e1c8388,d44b2b3..857ddaf
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@@ -527,6 -527,9 +527,9 @@@ static void rs_rate_scale_clear_tbl_win
  	IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
  	for (i = 0; i < IWL_RATE_COUNT; i++)
  		rs_rate_scale_clear_window(&tbl->win[i]);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+ 		rs_rate_scale_clear_window(&tbl->tpc_win[i]);
  }
  
  static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@@ -656,17 -659,34 +659,34 @@@ static int _rs_collect_tx_data(struct i
  	return 0;
  }
  
- static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- 			      int scale_index, int attempts, int successes)
+ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+ 			      struct iwl_scale_tbl_info *tbl,
+ 			      int scale_index, int attempts, int successes,
+ 			      u8 reduced_txp)
  {
  	struct iwl_rate_scale_data *window = NULL;
+ 	int ret;
  
  	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
  		return -EINVAL;
  
+ 	if (tbl->column != RS_COLUMN_INVALID) {
+ 		lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+ 		lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+ 	}
+ 
  	/* Select window for current tx bit rate */
  	window = &(tbl->win[scale_index]);
  
+ 	ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ 				  window);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ 		return -EINVAL;
+ 
+ 	window = &tbl->tpc_win[reduced_txp];
  	return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
  				   window);
  }
@@@ -1000,6 -1020,7 +1020,7 @@@ static void rs_tx_status(void *mvm_r, s
  	u32 ucode_rate;
  	struct rs_rate rate;
  	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ 	u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
  
  	/* Treat uninitialized rate scaling data same as non-existing. */
  	if (!lq_sta) {
@@@ -1010,7 -1031,7 +1031,7 @@@
  		return;
  	}
  
 -#ifdef CPTCFG_MAC80211_DEBUGFS
 +#ifdef CONFIG_MAC80211_DEBUGFS
  	/* Disable last tx check if we are debugging with fixed rate */
  	if (lq_sta->dbg_fixed_rate) {
  		IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
@@@ -1141,9 -1162,10 +1162,10 @@@
  	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
  		ucode_rate = le32_to_cpu(table->rs_table[0]);
  		rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- 		rs_collect_tx_data(curr_tbl, rate.index,
+ 		rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
  				   info->status.ampdu_len,
- 				   info->status.ampdu_ack_len);
+ 				   info->status.ampdu_ack_len,
+ 				   reduced_txp);
  
  		/* Update success/fail counts if not searching for new mode */
  		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@@ -1176,8 -1198,9 +1198,9 @@@
  			else
  				continue;
  
- 			rs_collect_tx_data(tmp_tbl, rate.index, 1,
- 					   i < retries ? 0 : legacy_success);
+ 			rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+ 					   i < retries ? 0 : legacy_success,
+ 					   reduced_txp);
  		}
  
  		/* Update success/fail counts if not searching for new mode */
@@@ -1188,6 -1211,7 +1211,7 @@@
  	}
  	/* The last TX rate is cached in lq_sta; it's set in if/else above */
  	lq_sta->last_rate_n_flags = ucode_rate;
+ 	IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
  done:
  	/* See if there's a better rate or modulation mode to try. */
  	if (sta && sta->supp_rates[sband->band])
@@@ -1769,6 -1793,198 +1793,198 @@@ out
  	return action;
  }
  
+ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+ 				int *weaker, int *stronger)
+ {
+ 	*weaker = index + TPC_TX_POWER_STEP;
+ 	if (*weaker > TPC_MAX_REDUCTION)
+ 		*weaker = TPC_INVALID;
+ 
+ 	*stronger = index - TPC_TX_POWER_STEP;
+ 	if (*stronger < 0)
+ 		*stronger = TPC_INVALID;
+ }
+ 
+ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
+ 			   enum ieee80211_band band)
+ {
+ 	int index = rate->index;
+ 
+ 	/*
+ 	 * allow tpc only if power management is enabled, or bt coex
+ 	 * activity grade allows it and we are on 2.4Ghz.
+ 	 */
+ 	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
+ 	    !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+ 		return false;
+ 
+ 	IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+ 	if (is_legacy(rate))
+ 		return index == IWL_RATE_54M_INDEX;
+ 	if (is_ht(rate))
+ 		return index == IWL_RATE_MCS_7_INDEX;
+ 	if (is_vht(rate))
+ 		return index == IWL_RATE_MCS_7_INDEX ||
+ 		       index == IWL_RATE_MCS_8_INDEX ||
+ 		       index == IWL_RATE_MCS_9_INDEX;
+ 
+ 	WARN_ON_ONCE(1);
+ 	return false;
+ }
+ 
+ enum tpc_action {
+ 	TPC_ACTION_STAY,
+ 	TPC_ACTION_DECREASE,
+ 	TPC_ACTION_INCREASE,
+ 	TPC_ACTION_NO_RESTIRCTION,
+ };
+ 
+ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+ 					 s32 sr, int weak, int strong,
+ 					 int current_tpt,
+ 					 int weak_tpt, int strong_tpt)
+ {
+ 	/* stay until we have valid tpt */
+ 	if (current_tpt == IWL_INVALID_VALUE) {
+ 		IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+ 		return TPC_ACTION_STAY;
+ 	}
+ 
+ 	/* Too many failures, increase txp */
+ 	if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+ 		IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+ 		return TPC_ACTION_NO_RESTIRCTION;
+ 	}
+ 
+ 	/* try decreasing first if applicable */
+ 	if (weak != TPC_INVALID) {
+ 		if (weak_tpt == IWL_INVALID_VALUE &&
+ 		    (strong_tpt == IWL_INVALID_VALUE ||
+ 		     current_tpt >= strong_tpt)) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "no weak txp measurement. decrease txp\n");
+ 			return TPC_ACTION_DECREASE;
+ 		}
+ 
+ 		if (weak_tpt > current_tpt) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "lower txp has better tpt. decrease txp\n");
+ 			return TPC_ACTION_DECREASE;
+ 		}
+ 	}
+ 
+ 	/* next, increase if needed */
+ 	if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+ 		if (weak_tpt == IWL_INVALID_VALUE &&
+ 		    strong_tpt != IWL_INVALID_VALUE &&
+ 		    current_tpt < strong_tpt) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "higher txp has better tpt. increase txp\n");
+ 			return TPC_ACTION_INCREASE;
+ 		}
+ 
+ 		if (weak_tpt < current_tpt &&
+ 		    (strong_tpt == IWL_INVALID_VALUE ||
+ 		     strong_tpt > current_tpt)) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "lower txp has worse tpt. increase txp\n");
+ 			return TPC_ACTION_INCREASE;
+ 		}
+ 	}
+ 
+ 	IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+ 	return TPC_ACTION_STAY;
+ }
+ 
+ static bool rs_tpc_perform(struct iwl_mvm *mvm,
+ 			   struct ieee80211_sta *sta,
+ 			   struct iwl_lq_sta *lq_sta,
+ 			   struct iwl_scale_tbl_info *tbl)
+ {
+ 	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ 	struct ieee80211_vif *vif = mvm_sta->vif;
+ 	struct ieee80211_chanctx_conf *chanctx_conf;
+ 	enum ieee80211_band band;
+ 	struct iwl_rate_scale_data *window;
+ 	struct rs_rate *rate = &tbl->rate;
+ 	enum tpc_action action;
+ 	s32 sr;
+ 	u8 cur = lq_sta->lq.reduced_tpc;
+ 	int current_tpt;
+ 	int weak, strong;
+ 	int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+ 
+ #ifdef CONFIG_MAC80211_DEBUGFS
+ 	if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+ 		IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
+ 			       lq_sta->dbg_fixed_txp_reduction);
+ 		lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+ 		return cur != lq_sta->dbg_fixed_txp_reduction;
+ 	}
+ #endif
+ 
+ 	rcu_read_lock();
+ 	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ 	if (WARN_ON(!chanctx_conf))
+ 		band = IEEE80211_NUM_BANDS;
+ 	else
+ 		band = chanctx_conf->def.chan->band;
+ 	rcu_read_unlock();
+ 
+ 	if (!rs_tpc_allowed(mvm, rate, band)) {
+ 		IWL_DEBUG_RATE(mvm,
+ 			       "tpc is not allowed. remove txp restrictions");
+ 		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ 		return cur != TPC_NO_REDUCTION;
+ 	}
+ 
+ 	rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+ 
+ 	/* Collect measured throughputs for current and adjacent rates */
+ 	window = tbl->tpc_win;
+ 	sr = window[cur].success_ratio;
+ 	current_tpt = window[cur].average_tpt;
+ 	if (weak != TPC_INVALID)
+ 		weak_tpt = window[weak].average_tpt;
+ 	if (strong != TPC_INVALID)
+ 		strong_tpt = window[strong].average_tpt;
+ 
+ 	IWL_DEBUG_RATE(mvm,
+ 		       "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+ 		       cur, current_tpt, sr, weak, strong,
+ 		       weak_tpt, strong_tpt);
+ 
+ 	action = rs_get_tpc_action(mvm, sr, weak, strong,
+ 				   current_tpt, weak_tpt, strong_tpt);
+ 
+ 	/* override actions if we are on the edge */
+ 	if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+ 		IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
+ 		action = TPC_ACTION_STAY;
+ 	} else if (strong == TPC_INVALID &&
+ 		   (action == TPC_ACTION_INCREASE ||
+ 		    action == TPC_ACTION_NO_RESTIRCTION)) {
+ 		IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
+ 		action = TPC_ACTION_STAY;
+ 	}
+ 
+ 	switch (action) {
+ 	case TPC_ACTION_DECREASE:
+ 		lq_sta->lq.reduced_tpc = weak;
+ 		return true;
+ 	case TPC_ACTION_INCREASE:
+ 		lq_sta->lq.reduced_tpc = strong;
+ 		return true;
+ 	case TPC_ACTION_NO_RESTIRCTION:
+ 		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ 		return true;
+ 	case TPC_ACTION_STAY:
+ 		/* do nothing */
+ 		break;
+ 	}
+ 	return false;
+ }
+ 
  /*
   * Do rate scaling and search for new modulation mode.
   */
@@@ -2019,6 -2235,8 +2235,8 @@@ static void rs_rate_scale_perform(struc
  		break;
  	case RS_ACTION_STAY:
  		/* No change */
+ 		update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+ 		break;
  	default:
  		break;
  	}
@@@ -2478,6 -2696,7 +2696,7 @@@ void iwl_mvm_rs_rate_init(struct iwl_mv
  	lq_sta->is_agg = 0;
  #ifdef CONFIG_MAC80211_DEBUGFS
  	lq_sta->dbg_fixed_rate = 0;
+ 	lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
  #endif
  #ifdef CONFIG_IWLWIFI_DEBUGFS
  	iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@@ -2653,6 -2872,7 +2872,7 @@@ static void rs_fill_lq_cmd(struct iwl_m
  		rs_build_rates_table_from_fixed(mvm, lq_cmd,
  						lq_sta->band,
  						lq_sta->dbg_fixed_rate);
+ 		lq_cmd->reduced_tpc = 0;
  		ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
  			RATE_MCS_ANT_POS;
  	} else
@@@ -2783,7 -3003,6 +3003,6 @@@ static ssize_t rs_sta_dbgfs_scale_table
  	size_t buf_size;
  	u32 parsed_rate;
  
- 
  	mvm = lq_sta->drv;
  	memset(buf, 0, sizeof(buf));
  	buf_size = min(count, sizeof(buf) -  1);
@@@ -2856,6 -3075,7 +3075,7 @@@ static ssize_t rs_sta_dbgfs_scale_table
  			lq_sta->lq.agg_disable_start_th,
  			lq_sta->lq.agg_frame_cnt_limit);
  
+ 	desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
  	desc += sprintf(buff+desc,
  			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
  			lq_sta->lq.initial_rate_index[0],
@@@ -2928,6 -3148,94 +3148,94 @@@ static const struct file_operations rs_
  	.llseek = default_llseek,
  };
  
+ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+ 					      char __user *user_buf,
+ 					      size_t count, loff_t *ppos)
+ {
+ 	static const char * const column_name[] = {
+ 		[RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+ 		[RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+ 		[RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+ 		[RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+ 		[RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+ 		[RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+ 		[RS_COLUMN_MIMO2] = "MIMO2",
+ 		[RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+ 	};
+ 
+ 	static const char * const rate_name[] = {
+ 		[IWL_RATE_1M_INDEX] = "1M",
+ 		[IWL_RATE_2M_INDEX] = "2M",
+ 		[IWL_RATE_5M_INDEX] = "5.5M",
+ 		[IWL_RATE_11M_INDEX] = "11M",
+ 		[IWL_RATE_6M_INDEX] = "6M|MCS0",
+ 		[IWL_RATE_9M_INDEX] = "9M",
+ 		[IWL_RATE_12M_INDEX] = "12M|MCS1",
+ 		[IWL_RATE_18M_INDEX] = "18M|MCS2",
+ 		[IWL_RATE_24M_INDEX] = "24M|MCS3",
+ 		[IWL_RATE_36M_INDEX] = "36M|MCS4",
+ 		[IWL_RATE_48M_INDEX] = "48M|MCS5",
+ 		[IWL_RATE_54M_INDEX] = "54M|MCS6",
+ 		[IWL_RATE_MCS_7_INDEX] = "MCS7",
+ 		[IWL_RATE_MCS_8_INDEX] = "MCS8",
+ 		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+ 	};
+ 
+ 	char *buff, *pos, *endpos;
+ 	int col, rate;
+ 	ssize_t ret;
+ 	struct iwl_lq_sta *lq_sta = file->private_data;
+ 	struct rs_rate_stats *stats;
+ 	static const size_t bufsz = 1024;
+ 
+ 	buff = kmalloc(bufsz, GFP_KERNEL);
+ 	if (!buff)
+ 		return -ENOMEM;
+ 
+ 	pos = buff;
+ 	endpos = pos + bufsz;
+ 
+ 	pos += scnprintf(pos, endpos - pos, "COLUMN,");
+ 	for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+ 		pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+ 	pos += scnprintf(pos, endpos - pos, "\n");
+ 
+ 	for (col = 0; col < RS_COLUMN_COUNT; col++) {
+ 		pos += scnprintf(pos, endpos - pos,
+ 				 "%s,", column_name[col]);
+ 
+ 		for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+ 			stats = &(lq_sta->tx_stats[col][rate]);
+ 			pos += scnprintf(pos, endpos - pos,
+ 					 "%llu/%llu,",
+ 					 stats->success,
+ 					 stats->total);
+ 		}
+ 		pos += scnprintf(pos, endpos - pos, "\n");
+ 	}
+ 
+ 	ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ 	kfree(buff);
+ 	return ret;
+ }
+ 
+ static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+ 					       const char __user *user_buf,
+ 					       size_t count, loff_t *ppos)
+ {
+ 	struct iwl_lq_sta *lq_sta = file->private_data;
+ 	memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+ 
+ 	return count;
+ }
+ 
+ static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+ 	.read = rs_sta_dbgfs_drv_tx_stats_read,
+ 	.write = rs_sta_dbgfs_drv_tx_stats_write,
+ 	.open = simple_open,
+ 	.llseek = default_llseek,
+ };
+ 
  static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
  {
  	struct iwl_lq_sta *lq_sta = mvm_sta;
@@@ -2937,9 -3245,15 +3245,15 @@@
  	lq_sta->rs_sta_dbgfs_stats_table_file =
  		debugfs_create_file("rate_stats_table", S_IRUSR, dir,
  				    lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ 	lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+ 		debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+ 				    lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
  	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
  		debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
  				  &lq_sta->tx_agg_tid_en);
+ 	lq_sta->rs_sta_dbgfs_reduced_txp_file =
+ 		debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+ 				  &lq_sta->dbg_fixed_txp_reduction);
  }
  
  static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@@ -2947,7 -3261,9 +3261,9 @@@
  	struct iwl_lq_sta *lq_sta = mvm_sta;
  	debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
  	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ 	debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
  	debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+ 	debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
  }
  #endif
  
diff --combined drivers/net/wireless/iwlwifi/mvm/scan.c
index c28de54,63e7b16..36ae01a
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@@ -277,22 -277,51 +277,22 @@@ static void iwl_mvm_scan_calc_params(st
  					    IEEE80211_IFACE_ITER_NORMAL,
  					    iwl_mvm_scan_condition_iterator,
  					    &global_bound);
 -	/*
 -	 * Under low latency traffic passive scan is fragmented meaning
 -	 * that dwell on a particular channel will be fragmented. Each fragment
 -	 * dwell time is 20ms and fragments period is 105ms. Skipping to next
 -	 * channel will be delayed by the same period - 105ms. So suspend_time
 -	 * parameter describing both fragments and channels skipping periods is
 -	 * set to 105ms. This value is chosen so that overall passive scan
 -	 * duration will not be too long. Max_out_time in this case is set to
 -	 * 70ms, so for active scanning operating channel will be left for 70ms
 -	 * while for passive still for 20ms (fragment dwell).
 -	 */
 -	if (global_bound) {
 -		if (!iwl_mvm_low_latency(mvm)) {
 -			params->suspend_time = ieee80211_tu_to_usec(100);
 -			params->max_out_time = ieee80211_tu_to_usec(600);
 -		} else {
 -			params->suspend_time = ieee80211_tu_to_usec(105);
 -			/* P2P doesn't support fragmented passive scan, so
 -			 * configure max_out_time to be at least longest dwell
 -			 * time for passive scan.
 -			 */
 -			if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
 -				params->max_out_time = ieee80211_tu_to_usec(70);
 -				params->passive_fragmented = true;
 -			} else {
 -				u32 passive_dwell;
  
 -				/*
 -				 * Use band G so that passive channel dwell time
 -				 * will be assigned with maximum value.
 -				 */
 -				band = IEEE80211_BAND_2GHZ;
 -				passive_dwell = iwl_mvm_get_passive_dwell(band);
 -				params->max_out_time =
 -					ieee80211_tu_to_usec(passive_dwell);
 -			}
 -		}
 +	if (!global_bound)
 +		goto not_bound;
 +
 +	params->suspend_time = 100;
 +	params->max_out_time = 600;
 +
 +	if (iwl_mvm_low_latency(mvm)) {
 +		params->suspend_time = 250;
 +		params->max_out_time = 250;
  	}
  
 +not_bound:
 +
  	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
 -		if (params->passive_fragmented)
 -			params->dwell[band].passive = 20;
 -		else
 -			params->dwell[band].passive =
 -				iwl_mvm_get_passive_dwell(band);
 +		params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
  		params->dwell[band].active = iwl_mvm_get_active_dwell(band,
  								      n_ssids);
  	}
@@@ -319,7 -348,10 +319,10 @@@ int iwl_mvm_scan_request(struct iwl_mv
  	struct iwl_mvm_scan_params params = {};
  
  	lockdep_assert_held(&mvm->mutex);
- 	BUG_ON(mvm->scan_cmd == NULL);
+ 
+ 	/* we should have failed registration if scan_cmd was NULL */
+ 	if (WARN_ON(mvm->scan_cmd == NULL))
+ 		return -ENOMEM;
  
  	IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
  	mvm->scan_status = IWL_MVM_SCAN_OS;
@@@ -538,9 -570,13 +541,13 @@@ int iwl_mvm_rx_scan_offload_complete_no
  	/* scan status must be locked for proper checking */
  	lockdep_assert_held(&mvm->mutex);
  
- 	IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ 	IWL_DEBUG_SCAN(mvm,
+ 		       "Scheduled scan completed, status %s EBS status %s:%d\n",
  		       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- 		       "completed" : "aborted");
+ 		       "completed" : "aborted", scan_notif->ebs_status ==
+ 		       IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+ 		       scan_notif->ebs_status);
+ 
  
  	/* only call mac80211 completion if the stop was initiated by FW */
  	if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@@ -548,6 -584,8 +555,8 @@@
  		ieee80211_sched_scan_stopped(mvm->hw);
  	}
  
+ 	mvm->last_ebs_successful = !scan_notif->ebs_status;
+ 
  	return 0;
  }
  
@@@ -732,7 -770,7 +741,7 @@@ int iwl_mvm_config_sched_scan(struct iw
  	int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
  	int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
  	int head = 0;
 -	int tail = band_2ghz + band_5ghz;
 +	int tail = band_2ghz + band_5ghz - 1;
  	u32 ssid_bitmap;
  	int cmd_len;
  	int ret;
@@@ -884,6 -922,11 +893,11 @@@ int iwl_mvm_sched_scan_start(struct iwl
  		scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
  	}
  
+ 	if (mvm->last_ebs_successful &&
+ 	    mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+ 		scan_req.flags |=
+ 			cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+ 
  	return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
  				    sizeof(scan_req), &scan_req);
  }
diff --combined drivers/net/wireless/iwlwifi/mvm/utils.c
index 2180902,c5f4532..eb2ca64
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@@ -64,6 -64,7 +64,7 @@@
  
  #include "iwl-debug.h"
  #include "iwl-io.h"
+ #include "iwl-prph.h"
  
  #include "mvm.h"
  #include "fw-api-rs.h"
@@@ -469,6 -470,8 +470,8 @@@ void iwl_mvm_dump_nic_error_log(struct 
  			mvm->status, table.valid);
  	}
  
+ 	/* Do not change this output - scripts rely on it */
+ 
  	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
  
  	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@@ -522,7 -525,7 +525,7 @@@ void iwl_mvm_fw_error_sram_dump(struct 
  	u32 ofs, sram_len;
  	void *sram;
  
- 	if (!mvm->ucode_loaded || mvm->fw_error_sram)
+ 	if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
  		return;
  
  	img = &mvm->fw->img[mvm->cur_ucode];
@@@ -538,6 -541,47 +541,47 @@@
  	mvm->fw_error_sram_len = sram_len;
  }
  
+ void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+ {
+ 	int i, reg_val;
+ 	unsigned long flags;
+ 
+ 	if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+ 		return;
+ 
+ 	/* reading buffer size */
+ 	reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+ 	mvm->fw_error_rxf_len =
+ 		(reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+ 
+ 	/* the register holds the value divided by 128 */
+ 	mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+ 
+ 	if (!mvm->fw_error_rxf_len)
+ 		return;
+ 
+ 	mvm->fw_error_rxf =  kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+ 	if (!mvm->fw_error_rxf) {
+ 		mvm->fw_error_rxf_len = 0;
+ 		return;
+ 	}
+ 
+ 	if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+ 		kfree(mvm->fw_error_rxf);
+ 		mvm->fw_error_rxf = NULL;
+ 		mvm->fw_error_rxf_len = 0;
+ 		return;
+ 	}
+ 
+ 	for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+ 		iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+ 				     i * sizeof(u32));
+ 		mvm->fw_error_rxf[i] =
+ 			iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+ 	}
+ 	iwl_trans_release_nic_access(mvm->trans, &flags);
+ }
+ 
  /**
   * iwl_mvm_send_lq_cmd() - Send link quality command
   * @init: This command is sent as part of station initialization right
@@@ -644,22 -688,3 +688,22 @@@ bool iwl_mvm_low_latency(struct iwl_mv
  
  	return result;
  }
 +
 +static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
 +{
 +	bool *idle = _data;
 +
 +	if (!vif->bss_conf.idle)
 +		*idle = false;
 +}
 +
 +bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
 +{
 +	bool idle = true;
 +
 +	ieee80211_iterate_active_interfaces_atomic(
 +			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 +			iwl_mvm_idle_iter, &idle);
 +
 +	return idle;
 +}
diff --combined drivers/net/wireless/iwlwifi/pcie/trans.c
index 2365553,f98ef1e..c76b148
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@@ -103,7 -103,6 +103,6 @@@ static void iwl_pcie_set_pwr(struct iwl
  
  /* PCI registers */
  #define PCI_CFG_RETRY_TIMEOUT	0x041
- #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
  
  static void iwl_pcie_apm_config(struct iwl_trans *trans)
  {
@@@ -1053,6 -1052,12 +1052,12 @@@ static void iwl_trans_pcie_write_prph(s
  	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
  }
  
+ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+ {
+ 	WARN_ON(1);
+ 	return 0;
+ }
+ 
  static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  				     const struct iwl_trans_config *trans_cfg)
  {
@@@ -1079,6 -1084,18 +1084,18 @@@
  
  	trans_pcie->command_names = trans_cfg->command_names;
  	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+ 
+ 	/* Initialize NAPI here - it should be before registering to mac80211
+ 	 * in the opmode but after the HW struct is allocated.
+ 	 * As this function may be called again in some corner cases don't
+ 	 * do anything if NAPI was already initialized.
+ 	 */
+ 	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ 		init_dummy_netdev(&trans_pcie->napi_dev);
+ 		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ 				     &trans_pcie->napi_dev,
+ 				     iwl_pcie_dummy_napi_poll, 64);
+ 	}
  }
  
  void iwl_trans_pcie_free(struct iwl_trans *trans)
@@@ -1099,6 -1116,9 +1116,9 @@@
  	pci_disable_device(trans_pcie->pci_dev);
  	kmem_cache_destroy(trans->dev_cmd_pool);
  
+ 	if (trans_pcie->napi.poll)
+ 		netif_napi_del(&trans_pcie->napi);
+ 
  	kfree(trans);
  }
  
@@@ -1237,7 -1257,7 +1257,7 @@@ static int iwl_trans_pcie_write_mem(str
  
  #define IWL_FLUSH_WAIT_MS	2000
  
- static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
  {
  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  	struct iwl_txq *txq;
@@@ -1250,13 -1270,31 +1270,31 @@@
  
  	/* waiting for all the tx frames complete might take a while */
  	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ 		u8 wr_ptr;
+ 
  		if (cnt == trans_pcie->cmd_queue)
  			continue;
+ 		if (!test_bit(cnt, trans_pcie->queue_used))
+ 			continue;
+ 		if (!(BIT(cnt) & txq_bm))
+ 			continue;
+ 
+ 		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
  		txq = &trans_pcie->txq[cnt];
  		q = &txq->q;
- 		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- 		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ 		wr_ptr = ACCESS_ONCE(q->write_ptr);
+ 
+ 		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ 		       !time_after(jiffies,
+ 				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ 			u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+ 
+ 			if (WARN_ONCE(wr_ptr != write_ptr,
+ 				      "WR pointer moved while flushing %d -> %d\n",
+ 				      wr_ptr, write_ptr))
+ 				return -ETIMEDOUT;
  			msleep(1);
+ 		}
  
  		if (q->read_ptr != q->write_ptr) {
  			IWL_ERR(trans,
@@@ -1264,6 -1302,7 +1302,7 @@@
  			ret = -ETIMEDOUT;
  			break;
  		}
+ 		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
  	}
  
  	if (!ret)
@@@ -1749,10 -1788,6 +1788,10 @@@ struct iwl_trans *iwl_trans_pcie_alloc(
  	 * PCI Tx retries from interfering with C3 CPU state */
  	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  
 +	trans->dev = &pdev->dev;
 +	trans_pcie->pci_dev = pdev;
 +	iwl_disable_interrupts(trans);
 +
  	err = pci_enable_msi(pdev);
  	if (err) {
  		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@@ -1764,6 -1799,8 +1803,6 @@@
  		}
  	}
  
 -	trans->dev = &pdev->dev;
 -	trans_pcie->pci_dev = pdev;
  	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@@ -1789,6 -1826,8 +1828,6 @@@
  		goto out_pci_disable_msi;
  	}
  
 -	trans_pcie->inta_mask = CSR_INI_SET_MASK;
 -
  	if (iwl_pcie_alloc_ict(trans))
  		goto out_free_cmd_pool;
  
@@@ -1800,8 -1839,6 +1839,8 @@@
  		goto out_free_ict;
  	}
  
 +	trans_pcie->inta_mask = CSR_INI_SET_MASK;
 +
  	return trans;
  
  out_free_ict:
diff --combined include/uapi/linux/audit.h
index 1b1efdd,dfa4c86..b21ea45
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@@ -331,17 -331,9 +331,17 @@@ enum 
  #define AUDIT_FAIL_PRINTK	1
  #define AUDIT_FAIL_PANIC	2
  
 +/*
 + * These bits disambiguate different calling conventions that share an
 + * ELF machine type, bitness, and endianness
 + */
 +#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
 +#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
 +
  /* distinguish syscall tables */
  #define __AUDIT_ARCH_64BIT 0x80000000
  #define __AUDIT_ARCH_LE	   0x40000000
 +
  #define AUDIT_ARCH_ALPHA	(EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_ARM		(EM_ARM|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_ARMEB	(EM_ARM)
@@@ -354,11 -346,7 +354,11 @@@
  #define AUDIT_ARCH_MIPS		(EM_MIPS)
  #define AUDIT_ARCH_MIPSEL	(EM_MIPS|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_MIPS64	(EM_MIPS|__AUDIT_ARCH_64BIT)
 +#define AUDIT_ARCH_MIPS64N32	(EM_MIPS|__AUDIT_ARCH_64BIT|\
 +				 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
  #define AUDIT_ARCH_MIPSEL64	(EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 +#define AUDIT_ARCH_MIPSEL64N32	(EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
 +				 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
  #define AUDIT_ARCH_OPENRISC	(EM_OPENRISC)
  #define AUDIT_ARCH_PARISC	(EM_PARISC)
  #define AUDIT_ARCH_PARISC64	(EM_PARISC|__AUDIT_ARCH_64BIT)
@@@ -385,6 -373,14 +385,14 @@@
   */
  #define AUDIT_MESSAGE_TEXT_MAX	8560
  
+ /* Multicast Netlink socket groups (default up to 32) */
+ enum audit_nlgrps {
+ 	AUDIT_NLGRP_NONE,	/* Group 0 not used */
+ 	AUDIT_NLGRP_READLOG,	/* "best effort" read only socket */
+ 	__AUDIT_NLGRP_MAX
+ };
+ #define AUDIT_NLGRP_MAX                (__AUDIT_NLGRP_MAX - 1)
+ 
  struct audit_status {
  	__u32		mask;		/* Bit mask for valid entries */
  	__u32		enabled;	/* 1 = enabled, 0 = disabled */
diff --combined lib/Kconfig.debug
index e548aa0,d1b7bdf..6da2c25
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@@ -180,7 -180,7 +180,7 @@@ config STRIP_ASM_SYM
  
  config READABLE_ASM
          bool "Generate readable assembler code"
 -        depends on DEBUG_KERNEL
 +        depends on DEBUG_KERNEL && !LTO
          help
            Disable some compiler optimizations that tend to generate human unreadable
            assembler output. This may make the kernel slightly slower, but it helps
@@@ -1620,6 -1620,19 +1620,19 @@@ config TEST_USER_COP
  
  	  If unsure, say N.
  
+ config TEST_BPF
+ 	tristate "Test BPF filter functionality"
+ 	default n
+ 	depends on m && NET
+ 	help
+ 	  This builds the "test_bpf" module that runs various test vectors
+ 	  against the BPF interpreter or BPF JIT compiler depending on the
+ 	  current setting. This is in particular useful for BPF JIT compiler
+ 	  development, but also to run regression tests against changes in
+ 	  the interpreter code.
+ 
+ 	  If unsure, say N.
+ 
  source "samples/Kconfig"
  
  source "lib/Kconfig.kgdb"
diff --combined net/ipv4/xfrm4_output.c
index 186a8ec,8e8c018..d5f6bd9
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@@ -25,7 -25,7 +25,7 @@@ static int xfrm4_tunnel_check_size(stru
  	if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
  		goto out;
  
- 	if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
+ 	if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
  		goto out;
  
  	mtu = dst_mtu(skb_dst(skb));
@@@ -62,7 -62,10 +62,7 @@@ int xfrm4_prepare_output(struct xfrm_st
  	if (err)
  		return err;
  
 -	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 -	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
 -
 -	skb->protocol = htons(ETH_P_IP);
 +	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
  
  	return x->outer_mode->output2(x, skb);
  }
@@@ -70,34 -73,27 +70,34 @@@ EXPORT_SYMBOL(xfrm4_prepare_output)
  
  int xfrm4_output_finish(struct sk_buff *skb)
  {
 +	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 +	skb->protocol = htons(ETH_P_IP);
 +
 +#ifdef CONFIG_NETFILTER
 +	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 +#endif
 +
 +	return xfrm_output(skb);
 +}
 +
 +static int __xfrm4_output(struct sk_buff *skb)
 +{
 +	struct xfrm_state *x = skb_dst(skb)->xfrm;
 +
  #ifdef CONFIG_NETFILTER
 -	if (!skb_dst(skb)->xfrm) {
 +	if (!x) {
  		IPCB(skb)->flags |= IPSKB_REROUTED;
  		return dst_output(skb);
  	}
 -
 -	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
  #endif
  
 -	skb->protocol = htons(ETH_P_IP);
 -	return xfrm_output(skb);
 +	return x->outer_mode->afinfo->output_finish(skb);
  }
  
  int xfrm4_output(struct sock *sk, struct sk_buff *skb)
  {
 -	struct dst_entry *dst = skb_dst(skb);
 -	struct xfrm_state *x = dst->xfrm;
 -
  	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
 -			    NULL, dst->dev,
 -			    x->outer_mode->afinfo->output_finish,
 +			    NULL, skb_dst(skb)->dev, __xfrm4_output,
  			    !(IPCB(skb)->flags & IPSKB_REROUTED));
  }
  
diff --combined net/ipv6/xfrm6_output.c
index b930d08,f47c8b1..433672d
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@@ -78,7 -78,7 +78,7 @@@ static int xfrm6_tunnel_check_size(stru
  	if (mtu < IPV6_MIN_MTU)
  		mtu = IPV6_MIN_MTU;
  
- 	if (!skb->local_df && skb->len > mtu) {
+ 	if (!skb->ignore_df && skb->len > mtu) {
  		skb->dev = dst->dev;
  
  		if (xfrm6_local_dontfrag(skb))
@@@ -114,7 -114,13 +114,7 @@@ int xfrm6_prepare_output(struct xfrm_st
  	if (err)
  		return err;
  
- 	skb->local_df = 1;
 -	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 -#ifdef CONFIG_NETFILTER
 -	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 -#endif
 -
 -	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->ignore_df = 1;
  
  	return x->outer_mode->output2(x, skb);
  }
@@@ -122,13 -128,11 +122,13 @@@ EXPORT_SYMBOL(xfrm6_prepare_output)
  
  int xfrm6_output_finish(struct sk_buff *skb)
  {
 +	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 +	skb->protocol = htons(ETH_P_IPV6);
 +
  #ifdef CONFIG_NETFILTER
  	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
  #endif
  
 -	skb->protocol = htons(ETH_P_IPV6);
  	return xfrm_output(skb);
  }
  
@@@ -138,13 -142,6 +138,13 @@@ static int __xfrm6_output(struct sk_buf
  	struct xfrm_state *x = dst->xfrm;
  	int mtu;
  
 +#ifdef CONFIG_NETFILTER
 +	if (!x) {
 +		IP6CB(skb)->flags |= IP6SKB_REROUTED;
 +		return dst_output(skb);
 +	}
 +#endif
 +
  	if (skb->protocol == htons(ETH_P_IPV6))
  		mtu = ip6_skb_dst_mtu(skb);
  	else
@@@ -153,7 -150,7 +153,7 @@@
  	if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
  		xfrm6_local_rxpmtu(skb, mtu);
  		return -EMSGSIZE;
- 	} else if (!skb->local_df && skb->len > mtu && skb->sk) {
+ 	} else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
  		xfrm_local_error(skb, mtu);
  		return -EMSGSIZE;
  	}
@@@ -168,7 -165,6 +168,7 @@@
  
  int xfrm6_output(struct sock *sk, struct sk_buff *skb)
  {
 -	return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
 -		       skb_dst(skb)->dev, __xfrm6_output);
 +	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
 +			    NULL, skb_dst(skb)->dev, __xfrm6_output,
 +			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  }

-- 
LinuxNextTracking


More information about the linux-merge mailing list