[linux-next] LinuxNextTracking branch, master, updated. next-20120712

batman at open-mesh.org batman at open-mesh.org
Fri Jul 13 00:17:07 CEST 2012


The following commit has been merged in the master branch:
commit 04c9f416e371cff076a8b3279fb213628915d059
Merge: c278fa53c123282f753b2264fc62c0e9502a32fa c1f5163de417dab01fa9daaf09a74bbb19303f3c
Author: David S. Miller <davem at davemloft.net>
Date:   Tue Jul 10 23:56:33 2012 -0700

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    Conflicts:
    	net/batman-adv/bridge_loop_avoidance.c
    	net/batman-adv/bridge_loop_avoidance.h
    	net/batman-adv/soft-interface.c
    	net/mac80211/mlme.c
    
    With merge help from Antonio Quartulli (batman-adv) and
    Stephen Rothwell (drivers/net/usb/qmi_wwan.c).
    
    The net/mac80211/mlme.c conflict seemed easy enough, accounting for a
    conversion to some new tracing macros.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined drivers/net/bonding/bond_main.c
index f5a40b9,2ee7699..4ddcc3e
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@@ -1445,8 -1445,8 +1445,8 @@@ static rx_handler_result_t bond_handle_
  	struct sk_buff *skb = *pskb;
  	struct slave *slave;
  	struct bonding *bond;
 -	int (*recv_probe)(struct sk_buff *, struct bonding *,
 -				struct slave *);
 +	int (*recv_probe)(const struct sk_buff *, struct bonding *,
 +			  struct slave *);
  	int ret = RX_HANDLER_ANOTHER;
  
  	skb = skb_share_check(skb, GFP_ATOMIC);
@@@ -1463,10 -1463,15 +1463,10 @@@
  
  	recv_probe = ACCESS_ONCE(bond->recv_probe);
  	if (recv_probe) {
 -		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
 -
 -		if (likely(nskb)) {
 -			ret = recv_probe(nskb, bond, slave);
 -			dev_kfree_skb(nskb);
 -			if (ret == RX_HANDLER_CONSUMED) {
 -				consume_skb(skb);
 -				return ret;
 -			}
 +		ret = recv_probe(skb, bond, slave);
 +		if (ret == RX_HANDLER_CONSUMED) {
 +			consume_skb(skb);
 +			return ret;
  		}
  	}
  
@@@ -2733,31 -2738,25 +2733,31 @@@ static void bond_validate_arp(struct bo
  	}
  }
  
 -static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
 -			 struct slave *slave)
 +static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
 +			struct slave *slave)
  {
 -	struct arphdr *arp;
 +	struct arphdr *arp = (struct arphdr *)skb->data;
  	unsigned char *arp_ptr;
  	__be32 sip, tip;
 +	int alen;
  
  	if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
  		return RX_HANDLER_ANOTHER;
  
  	read_lock(&bond->lock);
 +	alen = arp_hdr_len(bond->dev);
  
  	pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
  		 bond->dev->name, skb->dev->name);
  
 -	if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
 -		goto out_unlock;
 +	if (alen > skb_headlen(skb)) {
 +		arp = kmalloc(alen, GFP_ATOMIC);
 +		if (!arp)
 +			goto out_unlock;
 +		if (skb_copy_bits(skb, 0, arp, alen) < 0)
 +			goto out_unlock;
 +	}
  
 -	arp = arp_hdr(skb);
  	if (arp->ar_hln != bond->dev->addr_len ||
  	    skb->pkt_type == PACKET_OTHERHOST ||
  	    skb->pkt_type == PACKET_LOOPBACK ||
@@@ -2792,8 -2791,6 +2792,8 @@@
  
  out_unlock:
  	read_unlock(&bond->lock);
 +	if (arp != (struct arphdr *)skb->data)
 +		kfree(arp);
  	return RX_HANDLER_ANOTHER;
  }
  
@@@ -3230,6 -3227,12 +3230,12 @@@ static int bond_master_netdev_event(uns
  	switch (event) {
  	case NETDEV_CHANGENAME:
  		return bond_event_changename(event_bond);
+ 	case NETDEV_UNREGISTER:
+ 		bond_remove_proc_entry(event_bond);
+ 		break;
+ 	case NETDEV_REGISTER:
+ 		bond_create_proc_entry(event_bond);
+ 		break;
  	default:
  		break;
  	}
@@@ -3990,7 -3993,7 +3996,7 @@@ static int bond_xmit_roundrobin(struct 
  out:
  	if (res) {
  		/* no suitable interface, frame not sent */
 -		dev_kfree_skb(skb);
 +		kfree_skb(skb);
  	}
  
  	return NETDEV_TX_OK;
@@@ -4012,11 -4015,11 +4018,11 @@@ static int bond_xmit_activebackup(struc
  		res = bond_dev_queue_xmit(bond, skb,
  			bond->curr_active_slave->dev);
  
 +	read_unlock(&bond->curr_slave_lock);
 +
  	if (res)
  		/* no suitable interface, frame not sent */
 -		dev_kfree_skb(skb);
 -
 -	read_unlock(&bond->curr_slave_lock);
 +		kfree_skb(skb);
  
  	return NETDEV_TX_OK;
  }
@@@ -4055,7 -4058,7 +4061,7 @@@ static int bond_xmit_xor(struct sk_buf
  
  	if (res) {
  		/* no suitable interface, frame not sent */
 -		dev_kfree_skb(skb);
 +		kfree_skb(skb);
  	}
  
  	return NETDEV_TX_OK;
@@@ -4093,7 -4096,7 +4099,7 @@@ static int bond_xmit_broadcast(struct s
  
  				res = bond_dev_queue_xmit(bond, skb2, tx_dev);
  				if (res) {
 -					dev_kfree_skb(skb2);
 +					kfree_skb(skb2);
  					continue;
  				}
  			}
@@@ -4107,7 -4110,7 +4113,7 @@@
  out:
  	if (res)
  		/* no suitable interface, frame not sent */
 -		dev_kfree_skb(skb);
 +		kfree_skb(skb);
  
  	/* frame sent to all suitable interfaces */
  	return NETDEV_TX_OK;
@@@ -4213,7 -4216,7 +4219,7 @@@ static netdev_tx_t __bond_start_xmit(st
  		pr_err("%s: Error: Unknown bonding mode %d\n",
  		       dev->name, bond->params.mode);
  		WARN_ON_ONCE(1);
 -		dev_kfree_skb(skb);
 +		kfree_skb(skb);
  		return NETDEV_TX_OK;
  	}
  }
@@@ -4235,7 -4238,7 +4241,7 @@@ static netdev_tx_t bond_start_xmit(stru
  	if (bond->slave_cnt)
  		ret = __bond_start_xmit(skb, dev);
  	else
 -		dev_kfree_skb(skb);
 +		kfree_skb(skb);
  
  	read_unlock(&bond->lock);
  
@@@ -4414,8 -4417,6 +4420,6 @@@ static void bond_uninit(struct net_devi
  
  	bond_work_cancel_all(bond);
  
- 	bond_remove_proc_entry(bond);
- 
  	bond_debug_unregister(bond);
  
  	__hw_addr_flush(&bond->mc_list);
@@@ -4817,7 -4818,6 +4821,6 @@@ static int bond_init(struct net_device 
  
  	bond_set_lockdep_class(bond_dev);
  
- 	bond_create_proc_entry(bond);
  	list_add_tail(&bond->bond_list, &bn->dev_list);
  
  	bond_prepare_sysfs_group(bond);
diff --combined drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 42c13d8,1f78b63..36d3783
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@@ -166,7 -166,7 +166,7 @@@ static void atl1c_reset_pcie(struct atl
  	msleep(5);
  }
  
 -/*
 +/**
   * atl1c_irq_enable - Enable default interrupt generation settings
   * @adapter: board private structure
   */
@@@ -179,7 -179,7 +179,7 @@@ static inline void atl1c_irq_enable(str
  	}
  }
  
 -/*
 +/**
   * atl1c_irq_disable - Mask off interrupt generation on the NIC
   * @adapter: board private structure
   */
@@@ -192,7 -192,7 +192,7 @@@ static inline void atl1c_irq_disable(st
  	synchronize_irq(adapter->pdev->irq);
  }
  
 -/*
 +/**
   * atl1c_irq_reset - reset interrupt confiure on the NIC
   * @adapter: board private structure
   */
@@@ -220,7 -220,7 +220,7 @@@ static u32 atl1c_wait_until_idle(struc
  	return data;
  }
  
 -/*
 +/**
   * atl1c_phy_config - Timer Call-back
   * @data: pointer to netdev cast into an unsigned long
   */
@@@ -261,7 -261,6 +261,6 @@@ static void atl1c_check_link_status(str
  	if ((phy_data & BMSR_LSTATUS) == 0) {
  		/* link down */
  		netif_carrier_off(netdev);
- 		netif_stop_queue(netdev);
  		hw->hibernate = true;
  		if (atl1c_reset_mac(hw) != 0)
  			if (netif_msg_hw(adapter))
@@@ -361,7 -360,7 +360,7 @@@ static void atl1c_del_timer(struct atl1
  }
  
  
 -/*
 +/**
   * atl1c_tx_timeout - Respond to a Tx Hang
   * @netdev: network interface device structure
   */
@@@ -374,7 -373,7 +373,7 @@@ static void atl1c_tx_timeout(struct net
  	schedule_work(&adapter->common_task);
  }
  
 -/*
 +/**
   * atl1c_set_multi - Multicast and Promiscuous mode set
   * @netdev: network interface device structure
   *
@@@ -453,7 -452,7 +452,7 @@@ static void atl1c_restore_vlan(struct a
  	atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
  }
  
 -/*
 +/**
   * atl1c_set_mac - Change the Ethernet Address of the NIC
   * @netdev: network interface device structure
   * @p: pointer to an address structure
@@@ -518,7 -517,7 +517,7 @@@ static int atl1c_set_features(struct ne
  	return 0;
  }
  
 -/*
 +/**
   * atl1c_change_mtu - Change the Maximum Transfer Unit
   * @netdev: network interface device structure
   * @new_mtu: new value for maximum frame size
@@@ -577,6 -576,12 +576,6 @@@ static void atl1c_mdio_write(struct net
  	atl1c_write_phy_reg(&adapter->hw, reg_num, val);
  }
  
 -/*
 - * atl1c_mii_ioctl -
 - * @netdev:
 - * @ifreq:
 - * @cmd:
 - */
  static int atl1c_mii_ioctl(struct net_device *netdev,
  			   struct ifreq *ifr, int cmd)
  {
@@@ -627,6 -632,12 +626,6 @@@ out
  	return retval;
  }
  
 -/*
 - * atl1c_ioctl -
 - * @netdev:
 - * @ifreq:
 - * @cmd:
 - */
  static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  {
  	switch (cmd) {
@@@ -639,7 -650,7 +638,7 @@@
  	}
  }
  
 -/*
 +/**
   * atl1c_alloc_queues - Allocate memory for all rings
   * @adapter: board private structure to initialize
   *
@@@ -743,7 -754,7 +742,7 @@@ static void __devinit atl1c_patch_assig
  		i++;
  	}
  }
 -/*
 +/**
   * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
   * @adapter: board private structure to initialize
   *
@@@ -841,7 -852,7 +840,7 @@@ static inline void atl1c_clean_buffer(s
  	buffer_info->skb = NULL;
  	ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
  }
 -/*
 +/**
   * atl1c_clean_tx_ring - Free Tx-skb
   * @adapter: board private structure
   */
@@@ -866,7 -877,7 +865,7 @@@ static void atl1c_clean_tx_ring(struct 
  	tpd_ring->next_to_use = 0;
  }
  
 -/*
 +/**
   * atl1c_clean_rx_ring - Free rx-reservation skbs
   * @adapter: board private structure
   */
@@@ -919,7 -930,7 +918,7 @@@ static void atl1c_init_ring_ptrs(struc
  	}
  }
  
 -/*
 +/**
   * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
   * @adapter: board private structure
   *
@@@ -942,7 -953,7 +941,7 @@@ static void atl1c_free_ring_resources(s
  	}
  }
  
 -/*
 +/**
   * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
   * @adapter: board private structure
   *
@@@ -977,12 -988,12 +976,12 @@@ static int atl1c_setup_ring_resources(s
  	}
  	for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
  		tpd_ring[i].buffer_info =
 -			(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
 +			(tpd_ring->buffer_info + count);
  		count += tpd_ring[i].count;
  	}
  
  	rfd_ring->buffer_info =
 -		(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
 +		(tpd_ring->buffer_info + count);
  	count += rfd_ring->count;
  	rx_desc_count += rfd_ring->count;
  
@@@ -1215,7 -1226,7 +1214,7 @@@ static void atl1c_start_mac(struct atl1
   */
  static int atl1c_reset_mac(struct atl1c_hw *hw)
  {
 -	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
 +	struct atl1c_adapter *adapter = hw->adapter;
  	struct pci_dev *pdev = adapter->pdev;
  	u32 ctrl_data = 0;
  
@@@ -1351,7 -1362,7 +1350,7 @@@ static void atl1c_set_aspm(struct atl1c
  	return;
  }
  
 -/*
 +/**
   * atl1c_configure - Configure Transmit&Receive Unit after Reset
   * @adapter: board private structure
   *
@@@ -1465,7 -1476,7 +1464,7 @@@ static void atl1c_update_hw_stats(struc
  	}
  }
  
 -/*
 +/**
   * atl1c_get_stats - Get System Network Statistics
   * @netdev: network interface device structure
   *
@@@ -1519,7 -1530,8 +1518,7 @@@ static inline void atl1c_clear_phy_int(
  static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
  				enum atl1c_trans_queue type)
  {
 -	struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
 -				&adapter->tpd_ring[type];
 +	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
  	struct atl1c_buffer *buffer_info;
  	struct pci_dev *pdev = adapter->pdev;
  	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@@ -1546,10 -1558,11 +1545,10 @@@
  	return true;
  }
  
 -/*
 +/**
   * atl1c_intr - Interrupt Handler
   * @irq: interrupt number
   * @data: pointer to a network interface device structure
 - * @pt_regs: CPU registers structure
   */
  static irqreturn_t atl1c_intr(int irq, void *data)
  {
@@@ -1800,8 -1813,9 +1799,8 @@@ rrs_checked
  		atl1c_alloc_rx_buffer(adapter);
  }
  
 -/*
 +/**
   * atl1c_clean - NAPI Rx polling callback
 - * @adapter: board private structure
   */
  static int atl1c_clean(struct napi_struct *napi, int budget)
  {
@@@ -2256,7 -2270,7 +2255,7 @@@ static void atl1c_down(struct atl1c_ada
  	atl1c_reset_dma_ring(adapter);
  }
  
 -/*
 +/**
   * atl1c_open - Called when a network interface is made active
   * @netdev: network interface device structure
   *
@@@ -2295,7 -2309,7 +2294,7 @@@ err_up
  	return err;
  }
  
 -/*
 +/**
   * atl1c_close - Disables a network interface
   * @netdev: network interface device structure
   *
@@@ -2418,7 -2432,7 +2417,7 @@@ static int atl1c_init_netdev(struct net
  	return 0;
  }
  
 -/*
 +/**
   * atl1c_probe - Device Initialization Routine
   * @pdev: PCI device information struct
   * @ent: entry in atl1c_pci_tbl
@@@ -2565,7 -2579,7 +2564,7 @@@ err_dma
  	return err;
  }
  
 -/*
 +/**
   * atl1c_remove - Device Removal Routine
   * @pdev: PCI device information struct
   *
@@@ -2591,7 -2605,7 +2590,7 @@@ static void __devexit atl1c_remove(stru
  	free_netdev(netdev);
  }
  
 -/*
 +/**
   * atl1c_io_error_detected - called when PCI error is detected
   * @pdev: Pointer to PCI device
   * @state: The current pci connection state
@@@ -2619,7 -2633,7 +2618,7 @@@ static pci_ers_result_t atl1c_io_error_
  	return PCI_ERS_RESULT_NEED_RESET;
  }
  
 -/*
 +/**
   * atl1c_io_slot_reset - called after the pci bus has been reset.
   * @pdev: Pointer to PCI device
   *
@@@ -2647,7 -2661,7 +2646,7 @@@ static pci_ers_result_t atl1c_io_slot_r
  	return PCI_ERS_RESULT_RECOVERED;
  }
  
 -/*
 +/**
   * atl1c_io_resume - called when traffic can start flowing again.
   * @pdev: Pointer to PCI device
   *
@@@ -2690,7 -2704,7 +2689,7 @@@ static struct pci_driver atl1c_driver 
  	.driver.pm = &atl1c_pm_ops,
  };
  
 -/*
 +/**
   * atl1c_init_module - Driver Registration Routine
   *
   * atl1c_init_module is the first routine called when the driver is
@@@ -2701,7 -2715,7 +2700,7 @@@ static int __init atl1c_init_module(voi
  	return pci_register_driver(&atl1c_driver);
  }
  
 -/*
 +/**
   * atl1c_exit_module - Driver Exit Cleanup Routine
   *
   * atl1c_exit_module is called just before the driver is removed
diff --combined drivers/net/ethernet/broadcom/bnx2.c
index 1901da1,1fa4927..0ced154
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@@ -14,7 -14,6 +14,7 @@@
  #include <linux/module.h>
  #include <linux/moduleparam.h>
  
 +#include <linux/stringify.h>
  #include <linux/kernel.h>
  #include <linux/timer.h>
  #include <linux/errno.h>
@@@ -58,8 -57,8 +58,8 @@@
  #include "bnx2_fw.h"
  
  #define DRV_MODULE_NAME		"bnx2"
 -#define DRV_MODULE_VERSION	"2.2.1"
 -#define DRV_MODULE_RELDATE	"Dec 18, 2011"
 +#define DRV_MODULE_VERSION	"2.2.3"
 +#define DRV_MODULE_RELDATE	"June 27, 2012"
  #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
  #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
  #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
@@@ -873,7 -872,8 +873,7 @@@ bnx2_alloc_mem(struct bnx2 *bp
  
  			bnapi = &bp->bnx2_napi[i];
  
 -			sblk = (void *) (status_blk +
 -					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 +			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
  			bnapi->status_blk.msix = sblk;
  			bnapi->hw_tx_cons_ptr =
  				&sblk->status_tx_quick_consumer_index;
@@@ -1972,26 -1972,22 +1972,26 @@@ bnx2_remote_phy_event(struct bnx2 *bp
  		switch (speed) {
  			case BNX2_LINK_STATUS_10HALF:
  				bp->duplex = DUPLEX_HALF;
 +				/* fall through */
  			case BNX2_LINK_STATUS_10FULL:
  				bp->line_speed = SPEED_10;
  				break;
  			case BNX2_LINK_STATUS_100HALF:
  				bp->duplex = DUPLEX_HALF;
 +				/* fall through */
  			case BNX2_LINK_STATUS_100BASE_T4:
  			case BNX2_LINK_STATUS_100FULL:
  				bp->line_speed = SPEED_100;
  				break;
  			case BNX2_LINK_STATUS_1000HALF:
  				bp->duplex = DUPLEX_HALF;
 +				/* fall through */
  			case BNX2_LINK_STATUS_1000FULL:
  				bp->line_speed = SPEED_1000;
  				break;
  			case BNX2_LINK_STATUS_2500HALF:
  				bp->duplex = DUPLEX_HALF;
 +				/* fall through */
  			case BNX2_LINK_STATUS_2500FULL:
  				bp->line_speed = SPEED_2500;
  				break;
@@@ -2477,7 -2473,6 +2477,7 @@@ bnx2_dump_mcp_state(struct bnx2 *bp
  		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
  	pr_cont(" condition[%08x]\n",
  		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
 +	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
  	DP_SHMEM_LINE(bp, 0x3cc);
  	DP_SHMEM_LINE(bp, 0x3dc);
  	DP_SHMEM_LINE(bp, 0x3ec);
@@@ -5377,7 -5372,7 +5377,7 @@@ bnx2_free_tx_skbs(struct bnx2 *bp
  			int k, last;
  
  			if (skb == NULL) {
- 				j++;
+ 				j = NEXT_TX_BD(j);
  				continue;
  			}
  
@@@ -5389,8 -5384,8 +5389,8 @@@
  			tx_buf->skb = NULL;
  
  			last = tx_buf->nr_frags;
- 			j++;
- 			for (k = 0; k < last; k++, j++) {
+ 			j = NEXT_TX_BD(j);
+ 			for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
  				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
  				dma_unmap_page(&bp->pdev->dev,
  					dma_unmap_addr(tx_buf, mapping),
@@@ -6250,7 -6245,7 +6250,7 @@@ bnx2_enable_msix(struct bnx2 *bp, int m
  static int
  bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
  {
 -	int cpus = num_online_cpus();
 +	int cpus = netif_get_num_default_rss_queues();
  	int msix_vecs;
  
  	if (!bp->num_req_rx_rings)
@@@ -6411,75 -6406,6 +6411,75 @@@ bnx2_reset_task(struct work_struct *wor
  	rtnl_unlock();
  }
  
 +#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
 +
 +static void
 +bnx2_dump_ftq(struct bnx2 *bp)
 +{
 +	int i;
 +	u32 reg, bdidx, cid, valid;
 +	struct net_device *dev = bp->dev;
 +	static const struct ftq_reg {
 +		char *name;
 +		u32 off;
 +	} ftq_arr[] = {
 +		BNX2_FTQ_ENTRY(RV2P_P),
 +		BNX2_FTQ_ENTRY(RV2P_T),
 +		BNX2_FTQ_ENTRY(RV2P_M),
 +		BNX2_FTQ_ENTRY(TBDR_),
 +		BNX2_FTQ_ENTRY(TDMA_),
 +		BNX2_FTQ_ENTRY(TXP_),
 +		BNX2_FTQ_ENTRY(TXP_),
 +		BNX2_FTQ_ENTRY(TPAT_),
 +		BNX2_FTQ_ENTRY(RXP_C),
 +		BNX2_FTQ_ENTRY(RXP_),
 +		BNX2_FTQ_ENTRY(COM_COMXQ_),
 +		BNX2_FTQ_ENTRY(COM_COMTQ_),
 +		BNX2_FTQ_ENTRY(COM_COMQ_),
 +		BNX2_FTQ_ENTRY(CP_CPQ_),
 +	};
 +
 +	netdev_err(dev, "<--- start FTQ dump --->\n");
 +	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
 +		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
 +			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
 +
 +	netdev_err(dev, "CPU states:\n");
 +	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
 +		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
 +			   reg, bnx2_reg_rd_ind(bp, reg),
 +			   bnx2_reg_rd_ind(bp, reg + 4),
 +			   bnx2_reg_rd_ind(bp, reg + 8),
 +			   bnx2_reg_rd_ind(bp, reg + 0x1c),
 +			   bnx2_reg_rd_ind(bp, reg + 0x1c),
 +			   bnx2_reg_rd_ind(bp, reg + 0x20));
 +
 +	netdev_err(dev, "<--- end FTQ dump --->\n");
 +	netdev_err(dev, "<--- start TBDC dump --->\n");
 +	netdev_err(dev, "TBDC free cnt: %ld\n",
 +		   REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
 +	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
 +	for (i = 0; i < 0x20; i++) {
 +		int j = 0;
 +
 +		REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
 +		REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
 +		       BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
 +		REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
 +		while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
 +			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
 +			j++;
 +
 +		cid = REG_RD(bp, BNX2_TBDC_CID);
 +		bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
 +		valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
 +		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
 +			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
 +			   bdidx >> 24, (valid >> 8) & 0x0ff);
 +	}
 +	netdev_err(dev, "<--- end TBDC dump --->\n");
 +}
 +
  static void
  bnx2_dump_state(struct bnx2 *bp)
  {
@@@ -6509,7 -6435,6 +6509,7 @@@ bnx2_tx_timeout(struct net_device *dev
  {
  	struct bnx2 *bp = netdev_priv(dev);
  
 +	bnx2_dump_ftq(bp);
  	bnx2_dump_state(bp);
  	bnx2_dump_mcp_state(bp);
  
@@@ -6703,7 -6628,6 +6703,7 @@@ bnx2_close(struct net_device *dev
  
  	bnx2_disable_int_sync(bp);
  	bnx2_napi_disable(bp);
 +	netif_tx_disable(dev);
  	del_timer_sync(&bp->timer);
  	bnx2_shutdown_chip(bp);
  	bnx2_free_irq(bp);
@@@ -7908,7 -7832,7 +7908,7 @@@ bnx2_get_5709_media(struct bnx2 *bp
  	else
  		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
  
 -	if (PCI_FUNC(bp->pdev->devfn) == 0) {
 +	if (bp->func == 0) {
  		switch (strap) {
  		case 0x4:
  		case 0x5:
@@@ -8207,12 -8131,9 +8207,12 @@@ bnx2_init_board(struct pci_dev *pdev, s
  
  	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
  
 +	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
 +		bp->func = 1;
 +
  	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
  	    BNX2_SHM_HDR_SIGNATURE_SIG) {
 -		u32 off = PCI_FUNC(pdev->devfn) << 2;
 +		u32 off = bp->func << 2;
  
  		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
  	} else
diff --combined drivers/net/ethernet/broadcom/cnic.c
index 650c545,2c89d17..3b4fc61
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@@ -256,16 -256,11 +256,16 @@@ static void cnic_ulp_ctl(struct cnic_de
  	struct cnic_local *cp = dev->cnic_priv;
  	struct cnic_eth_dev *ethdev = cp->ethdev;
  	struct drv_ctl_info info;
 +	struct fcoe_capabilities *fcoe_cap =
 +		&info.data.register_data.fcoe_features;
  
 -	if (reg)
 +	if (reg) {
  		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 -	else
 +		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
 +			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
 +	} else {
  		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
 +	}
  
  	info.data.ulp_type = ulp_type;
  	ethdev->drv_ctl(dev->netdev, &info);
@@@ -291,9 -286,6 +291,9 @@@ static int cnic_get_l5_cid(struct cnic_
  {
  	u32 i;
  
 +	if (!cp->ctx_tbl)
 +		return -EINVAL;
 +
  	for (i = 0; i < cp->max_cid_space; i++) {
  		if (cp->ctx_tbl[i].cid == cid) {
  			*l5_cid = i;
@@@ -542,7 -534,8 +542,8 @@@ int cnic_unregister_driver(int ulp_type
  	}
  
  	if (atomic_read(&ulp_ops->ref_count) != 0)
- 		netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ 		pr_warn("%s: Failed waiting for ref count to go to zero\n",
+ 			__func__);
  	return 0;
  
  out_unlock:
@@@ -619,8 -612,6 +620,8 @@@ static int cnic_unregister_device(struc
  
  	if (ulp_type == CNIC_ULP_ISCSI)
  		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 +	else if (ulp_type == CNIC_ULP_FCOE)
 +		dev->fcoe_cap = NULL;
  
  	synchronize_rcu();
  
@@@ -2598,7 -2589,7 +2599,7 @@@ static void cnic_bnx2x_kwqe_err(struct 
  		return;
  	}
  
 -	cqes[0] = (struct kcqe *) &kcqe;
 +	cqes[0] = &kcqe;
  	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
  }
  
@@@ -3226,9 -3217,6 +3227,9 @@@ static int cnic_ctl(void *data, struct 
  		u32 l5_cid;
  		struct cnic_local *cp = dev->cnic_priv;
  
 +		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
 +			break;
 +
  		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
  			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
  
@@@ -3959,15 -3947,6 +3960,15 @@@ static void cnic_cm_process_kcqe(struc
  		cnic_cm_upcall(cp, csk, opcode);
  		break;
  
 +	case L5CM_RAMROD_CMD_ID_CLOSE:
 +		if (l4kcqe->status != 0) {
 +			netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
 +				    "status 0x%x\n", l4kcqe->status);
 +			opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
 +			/* Fall through */
 +		} else {
 +			break;
 +		}
  	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
  	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
  	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@@ -4271,6 -4250,8 +4272,6 @@@ static int cnic_cm_shutdown(struct cnic
  	struct cnic_local *cp = dev->cnic_priv;
  	int i;
  
 -	cp->stop_cm(dev);
 -
  	if (!cp->csk_tbl)
  		return 0;
  
@@@ -4688,9 -4669,9 +4689,9 @@@ static int cnic_start_bnx2_hw(struct cn
  
  	cp->kcq1.sw_prod_idx = 0;
  	cp->kcq1.hw_prod_idx_ptr =
 -		(u16 *) &sblk->status_completion_producer_index;
 +		&sblk->status_completion_producer_index;
  
 -	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
 +	cp->kcq1.status_idx_ptr = &sblk->status_idx;
  
  	/* Initialize the kernel complete queue context. */
  	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@@ -4716,9 -4697,9 +4717,9 @@@
  		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
  
  		cp->kcq1.hw_prod_idx_ptr =
 -			(u16 *) &msblk->status_completion_producer_index;
 -		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
 -		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
 +			&msblk->status_completion_producer_index;
 +		cp->kcq1.status_idx_ptr = &msblk->status_idx;
 +		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
  		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
  		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
  		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@@ -5000,14 -4981,8 +5001,14 @@@ static int cnic_start_bnx2x_hw(struct c
  	cp->port_mode = CHIP_PORT_MODE_NONE;
  
  	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 -		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
 +		u32 val;
  
 +		pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
 +		cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
 +				 ME_REG_ABS_PF_NUM_SHIFT);
 +		func = CNIC_FUNC(cp);
 +
 +		val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
  		if (!(val & 1))
  			val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
  		else
@@@ -5312,7 -5287,6 +5313,7 @@@ static void cnic_stop_hw(struct cnic_de
  			i++;
  		}
  		cnic_shutdown_rings(dev);
 +		cp->stop_cm(dev);
  		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
  		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
  		synchronize_rcu();
@@@ -5542,7 -5516,9 +5543,7 @@@ static void cnic_rcv_netevent(struct cn
  	rcu_read_unlock();
  }
  
 -/**
 - * netdev event handler
 - */
 +/* netdev event handler */
  static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
  							 void *ptr)
  {
diff --combined drivers/net/ethernet/freescale/gianfar.c
index af16f9f,ab1d80f..4605f72
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@@ -1,4 -1,5 +1,4 @@@
 -/*
 - * drivers/net/ethernet/freescale/gianfar.c
 +/* drivers/net/ethernet/freescale/gianfar.c
   *
   * Gianfar Ethernet Driver
   * This driver is designed for the non-CPM ethernet controllers
@@@ -113,7 -114,7 +113,7 @@@ static void gfar_timeout(struct net_dev
  static int gfar_close(struct net_device *dev);
  struct sk_buff *gfar_new_skb(struct net_device *dev);
  static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 -		struct sk_buff *skb);
 +			   struct sk_buff *skb);
  static int gfar_set_mac_address(struct net_device *dev);
  static int gfar_change_mtu(struct net_device *dev, int new_mtu);
  static irqreturn_t gfar_error(int irq, void *dev_id);
@@@ -265,8 -266,8 +265,8 @@@ static int gfar_alloc_skb_resources(str
  		tx_queue->tx_bd_dma_base = addr;
  		tx_queue->dev = ndev;
  		/* enet DMA only understands physical addresses */
 -		addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
 -		vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
 +		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 +		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
  	}
  
  	/* Start the rx descriptor ring where the tx ring leaves off */
@@@ -275,16 -276,15 +275,16 @@@
  		rx_queue->rx_bd_base = vaddr;
  		rx_queue->rx_bd_dma_base = addr;
  		rx_queue->dev = ndev;
 -		addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
 -		vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
 +		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 +		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
  	}
  
  	/* Setup the skbuff rings */
  	for (i = 0; i < priv->num_tx_queues; i++) {
  		tx_queue = priv->tx_queue[i];
  		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
 -				  tx_queue->tx_ring_size, GFP_KERNEL);
 +					      tx_queue->tx_ring_size,
 +					      GFP_KERNEL);
  		if (!tx_queue->tx_skbuff) {
  			netif_err(priv, ifup, ndev,
  				  "Could not allocate tx_skbuff\n");
@@@ -298,8 -298,7 +298,8 @@@
  	for (i = 0; i < priv->num_rx_queues; i++) {
  		rx_queue = priv->rx_queue[i];
  		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
 -				  rx_queue->rx_ring_size, GFP_KERNEL);
 +					      rx_queue->rx_ring_size,
 +					      GFP_KERNEL);
  
  		if (!rx_queue->rx_skbuff) {
  			netif_err(priv, ifup, ndev,
@@@ -328,15 -327,15 +328,15 @@@ static void gfar_init_tx_rx_base(struc
  	int i;
  
  	baddr = &regs->tbase0;
 -	for(i = 0; i < priv->num_tx_queues; i++) {
 +	for (i = 0; i < priv->num_tx_queues; i++) {
  		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
 -		baddr	+= 2;
 +		baddr += 2;
  	}
  
  	baddr = &regs->rbase0;
 -	for(i = 0; i < priv->num_rx_queues; i++) {
 +	for (i = 0; i < priv->num_rx_queues; i++) {
  		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
 -		baddr   += 2;
 +		baddr += 2;
  	}
  }
  
@@@ -406,8 -405,7 +406,8 @@@ static void gfar_init_mac(struct net_de
  	gfar_write(&regs->attreli, attrs);
  
  	/* Start with defaults, and add stashing or locking
 -	 * depending on the approprate variables */
 +	 * depending on the approprate variables
 +	 */
  	attrs = ATTR_INIT_SETTINGS;
  
  	if (priv->bd_stash_en)
@@@ -428,16 -426,16 +428,16 @@@ static struct net_device_stats *gfar_ge
  	struct gfar_private *priv = netdev_priv(dev);
  	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
  	unsigned long tx_packets = 0, tx_bytes = 0;
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_rx_queues; i++) {
  		rx_packets += priv->rx_queue[i]->stats.rx_packets;
 -		rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
 +		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
  		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
  	}
  
  	dev->stats.rx_packets = rx_packets;
 -	dev->stats.rx_bytes = rx_bytes;
 +	dev->stats.rx_bytes   = rx_bytes;
  	dev->stats.rx_dropped = rx_dropped;
  
  	for (i = 0; i < priv->num_tx_queues; i++) {
@@@ -445,7 -443,7 +445,7 @@@
  		tx_packets += priv->tx_queue[i]->stats.tx_packets;
  	}
  
 -	dev->stats.tx_bytes = tx_bytes;
 +	dev->stats.tx_bytes   = tx_bytes;
  	dev->stats.tx_packets = tx_packets;
  
  	return &dev->stats;
@@@ -470,7 -468,7 +470,7 @@@ static const struct net_device_ops gfar
  
  void lock_rx_qs(struct gfar_private *priv)
  {
 -	int i = 0x0;
 +	int i;
  
  	for (i = 0; i < priv->num_rx_queues; i++)
  		spin_lock(&priv->rx_queue[i]->rxlock);
@@@ -478,7 -476,7 +478,7 @@@
  
  void lock_tx_qs(struct gfar_private *priv)
  {
 -	int i = 0x0;
 +	int i;
  
  	for (i = 0; i < priv->num_tx_queues; i++)
  		spin_lock(&priv->tx_queue[i]->txlock);
@@@ -486,7 -484,7 +486,7 @@@
  
  void unlock_rx_qs(struct gfar_private *priv)
  {
 -	int i = 0x0;
 +	int i;
  
  	for (i = 0; i < priv->num_rx_queues; i++)
  		spin_unlock(&priv->rx_queue[i]->rxlock);
@@@ -494,7 -492,7 +494,7 @@@
  
  void unlock_tx_qs(struct gfar_private *priv)
  {
 -	int i = 0x0;
 +	int i;
  
  	for (i = 0; i < priv->num_tx_queues; i++)
  		spin_unlock(&priv->tx_queue[i]->txlock);
@@@ -510,13 -508,13 +510,13 @@@ static bool gfar_is_vlan_on(struct gfar
  static inline int gfar_uses_fcb(struct gfar_private *priv)
  {
  	return gfar_is_vlan_on(priv) ||
 -		(priv->ndev->features & NETIF_F_RXCSUM) ||
 -		(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
 +	       (priv->ndev->features & NETIF_F_RXCSUM) ||
 +	       (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
  }
  
  static void free_tx_pointers(struct gfar_private *priv)
  {
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_tx_queues; i++)
  		kfree(priv->tx_queue[i]);
@@@ -524,7 -522,7 +524,7 @@@
  
  static void free_rx_pointers(struct gfar_private *priv)
  {
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_rx_queues; i++)
  		kfree(priv->rx_queue[i]);
@@@ -532,7 -530,7 +532,7 @@@
  
  static void unmap_group_regs(struct gfar_private *priv)
  {
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < MAXGROUPS; i++)
  		if (priv->gfargrp[i].regs)
@@@ -541,7 -539,7 +541,7 @@@
  
  static void disable_napi(struct gfar_private *priv)
  {
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_grps; i++)
  		napi_disable(&priv->gfargrp[i].napi);
@@@ -549,14 -547,14 +549,14 @@@
  
  static void enable_napi(struct gfar_private *priv)
  {
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_grps; i++)
  		napi_enable(&priv->gfargrp[i].napi);
  }
  
  static int gfar_parse_group(struct device_node *np,
 -		struct gfar_private *priv, const char *model)
 +			    struct gfar_private *priv, const char *model)
  {
  	u32 *queue_mask;
  
@@@ -582,13 -580,15 +582,13 @@@
  	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
  	priv->gfargrp[priv->num_grps].priv = priv;
  	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
 -	if(priv->mode == MQ_MG_MODE) {
 -		queue_mask = (u32 *)of_get_property(np,
 -					"fsl,rx-bit-map", NULL);
 -		priv->gfargrp[priv->num_grps].rx_bit_map =
 -			queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
 -		queue_mask = (u32 *)of_get_property(np,
 -					"fsl,tx-bit-map", NULL);
 -		priv->gfargrp[priv->num_grps].tx_bit_map =
 -			queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
 +	if (priv->mode == MQ_MG_MODE) {
 +		queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
 +		priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
 +			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
 +		queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
 +		priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
 +			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
  	} else {
  		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
  		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@@ -652,7 -652,7 +652,7 @@@ static int gfar_of_init(struct platform
  	priv->num_rx_queues = num_rx_qs;
  	priv->num_grps = 0x0;
  
 -	/* Init Rx queue filer rule set linked list*/
 +	/* Init Rx queue filer rule set linked list */
  	INIT_LIST_HEAD(&priv->rx_list.list);
  	priv->rx_list.count = 0;
  	mutex_init(&priv->rx_queue_access);
@@@ -673,7 -673,7 +673,7 @@@
  	} else {
  		priv->mode = SQ_SG_MODE;
  		err = gfar_parse_group(np, priv, model);
 -		if(err)
 +		if (err)
  			goto err_grp_init;
  	}
  
@@@ -730,27 -730,27 +730,27 @@@
  		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
  
  	mac_addr = of_get_mac_address(np);
 +
  	if (mac_addr)
  		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
  
  	if (model && !strcasecmp(model, "TSEC"))
 -		priv->device_flags =
 -			FSL_GIANFAR_DEV_HAS_GIGABIT |
 -			FSL_GIANFAR_DEV_HAS_COALESCE |
 -			FSL_GIANFAR_DEV_HAS_RMON |
 -			FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 +		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
 +				     FSL_GIANFAR_DEV_HAS_COALESCE |
 +				     FSL_GIANFAR_DEV_HAS_RMON |
 +				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 +
  	if (model && !strcasecmp(model, "eTSEC"))
 -		priv->device_flags =
 -			FSL_GIANFAR_DEV_HAS_GIGABIT |
 -			FSL_GIANFAR_DEV_HAS_COALESCE |
 -			FSL_GIANFAR_DEV_HAS_RMON |
 -			FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 -			FSL_GIANFAR_DEV_HAS_PADDING |
 -			FSL_GIANFAR_DEV_HAS_CSUM |
 -			FSL_GIANFAR_DEV_HAS_VLAN |
 -			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 -			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 -			FSL_GIANFAR_DEV_HAS_TIMER;
 +		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
 +				     FSL_GIANFAR_DEV_HAS_COALESCE |
 +				     FSL_GIANFAR_DEV_HAS_RMON |
 +				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 +				     FSL_GIANFAR_DEV_HAS_PADDING |
 +				     FSL_GIANFAR_DEV_HAS_CSUM |
 +				     FSL_GIANFAR_DEV_HAS_VLAN |
 +				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 +				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 +				     FSL_GIANFAR_DEV_HAS_TIMER;
  
  	ctype = of_get_property(np, "phy-connection-type", NULL);
  
@@@ -781,7 -781,7 +781,7 @@@ err_grp_init
  }
  
  static int gfar_hwtstamp_ioctl(struct net_device *netdev,
 -			struct ifreq *ifr, int cmd)
 +			       struct ifreq *ifr, int cmd)
  {
  	struct hwtstamp_config config;
  	struct gfar_private *priv = netdev_priv(netdev);
@@@ -851,7 -851,6 +851,7 @@@ static unsigned int reverse_bitmap(unsi
  {
  	unsigned int new_bit_map = 0x0;
  	int mask = 0x1 << (max_qs - 1), i;
 +
  	for (i = 0; i < max_qs; i++) {
  		if (bit_map & mask)
  			new_bit_map = new_bit_map + (1 << i);
@@@ -937,22 -936,22 +937,22 @@@ static void gfar_detect_errata(struct g
  
  	/* MPC8313 Rev 2.0 and higher; All MPC837x */
  	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
 -			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 +	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
  		priv->errata |= GFAR_ERRATA_74;
  
  	/* MPC8313 and MPC837x all rev */
  	if ((pvr == 0x80850010 && mod == 0x80b0) ||
 -			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 +	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
  		priv->errata |= GFAR_ERRATA_76;
  
  	/* MPC8313 and MPC837x all rev */
  	if ((pvr == 0x80850010 && mod == 0x80b0) ||
 -			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 +	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
  		priv->errata |= GFAR_ERRATA_A002;
  
  	/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
  	if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
 -			(pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
 +	    (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
  		priv->errata |= GFAR_ERRATA_12;
  
  	if (priv->errata)
@@@ -961,8 -960,7 +961,8 @@@
  }
  
  /* Set up the ethernet device structure, private data,
 - * and anything else we need before we start */
 + * and anything else we need before we start
 + */
  static int gfar_probe(struct platform_device *ofdev)
  {
  	u32 tempval;
@@@ -993,9 -991,8 +993,9 @@@
  
  	gfar_detect_errata(priv);
  
 -	/* Stop the DMA engine now, in case it was running before */
 -	/* (The firmware could have used it, and left it running). */
 +	/* Stop the DMA engine now, in case it was running before
 +	 * (The firmware could have used it, and left it running).
 +	 */
  	gfar_halt(dev);
  
  	/* Reset MAC layer */
@@@ -1029,14 -1026,13 +1029,14 @@@
  
  	/* Register for napi ...We are registering NAPI for each grp */
  	for (i = 0; i < priv->num_grps; i++)
 -		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
 +		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
 +			       GFAR_DEV_WEIGHT);
  
  	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
  		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
 -			NETIF_F_RXCSUM;
 +				   NETIF_F_RXCSUM;
  		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
 -			NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
 +				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
  	}
  
  	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@@ -1085,7 -1081,7 +1085,7 @@@
  		priv->padding = 0;
  
  	if (dev->features & NETIF_F_IP_CSUM ||
 -			priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
 +	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
  		dev->needed_headroom = GMAC_FCB_LEN;
  
  	/* Program the isrg regs only if number of grps > 1 */
@@@ -1102,32 -1098,28 +1102,32 @@@
  
  	/* Need to reverse the bit maps as  bit_map's MSB is q0
  	 * but, for_each_set_bit parses from right to left, which
 -	 * basically reverses the queue numbers */
 +	 * basically reverses the queue numbers
 +	 */
  	for (i = 0; i< priv->num_grps; i++) {
 -		priv->gfargrp[i].tx_bit_map = reverse_bitmap(
 -				priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
 -		priv->gfargrp[i].rx_bit_map = reverse_bitmap(
 -				priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
 +		priv->gfargrp[i].tx_bit_map =
 +			reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
 +		priv->gfargrp[i].rx_bit_map =
 +			reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
  	}
  
  	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
 -	 * also assign queues to groups */
 +	 * also assign queues to groups
 +	 */
  	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
  		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
 +
  		for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
 -				priv->num_rx_queues) {
 +				 priv->num_rx_queues) {
  			priv->gfargrp[grp_idx].num_rx_queues++;
  			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
  			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
  			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
  		}
  		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
 +
  		for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
 -				priv->num_tx_queues) {
 +				 priv->num_tx_queues) {
  			priv->gfargrp[grp_idx].num_tx_queues++;
  			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
  			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@@ -1157,7 -1149,7 +1157,7 @@@
  		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
  	}
  
 -	/* always enable rx filer*/
 +	/* always enable rx filer */
  	priv->rx_filer_enable = 1;
  	/* Enable most messages by default */
  	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@@ -1173,8 -1165,7 +1173,8 @@@
  	}
  
  	device_init_wakeup(&dev->dev,
 -		priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 +			   priv->device_flags &
 +			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  
  	/* fill out IRQ number and name fields */
  	for (i = 0; i < priv->num_grps; i++) {
@@@ -1198,14 -1189,13 +1198,14 @@@
  	/* Print out the device info */
  	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
  
 -	/* Even more device info helps when determining which kernel */
 -	/* provided which set of benchmarks. */
 +	/* Even more device info helps when determining which kernel
 +	 * provided which set of benchmarks.
 +	 */
  	netdev_info(dev, "Running with NAPI enabled\n");
  	for (i = 0; i < priv->num_rx_queues; i++)
  		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
  			    i, priv->rx_queue[i]->rx_ring_size);
 -	for(i = 0; i < priv->num_tx_queues; i++)
 +	for (i = 0; i < priv->num_tx_queues; i++)
  		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
  			    i, priv->tx_queue[i]->tx_ring_size);
  
@@@ -1252,8 -1242,7 +1252,8 @@@ static int gfar_suspend(struct device *
  	u32 tempval;
  
  	int magic_packet = priv->wol_en &&
 -		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 +			   (priv->device_flags &
 +			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  
  	netif_device_detach(ndev);
  
@@@ -1305,8 -1294,7 +1305,8 @@@ static int gfar_resume(struct device *d
  	unsigned long flags;
  	u32 tempval;
  	int magic_packet = priv->wol_en &&
 -		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 +			   (priv->device_flags &
 +			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
  
  	if (!netif_running(ndev)) {
  		netif_device_attach(ndev);
@@@ -1405,13 -1393,13 +1405,13 @@@ static phy_interface_t gfar_get_interfa
  	}
  
  	if (ecntrl & ECNTRL_REDUCED_MODE) {
 -		if (ecntrl & ECNTRL_REDUCED_MII_MODE)
 +		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
  			return PHY_INTERFACE_MODE_RMII;
 +		}
  		else {
  			phy_interface_t interface = priv->interface;
  
 -			/*
 -			 * This isn't autodetected right now, so it must
 +			/* This isn't autodetected right now, so it must
  			 * be set by the device tree or platform code.
  			 */
  			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@@ -1465,7 -1453,8 +1465,7 @@@ static int init_phy(struct net_device *
  	return 0;
  }
  
 -/*
 - * Initialize TBI PHY interface for communicating with the
 +/* Initialize TBI PHY interface for communicating with the
   * SERDES lynx PHY on the chip.  We communicate with this PHY
   * through the MDIO bus on each controller, treating it as a
   * "normal" PHY at the address found in the TBIPA register.  We assume
@@@ -1490,7 -1479,8 +1490,7 @@@ static void gfar_configure_serdes(struc
  		return;
  	}
  
 -	/*
 -	 * If the link is already up, we must already be ok, and don't need to
 +	/* If the link is already up, we must already be ok, and don't need to
  	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
  	 * everything for us?  Resetting it takes the link down and requires
  	 * several seconds for it to come back.
@@@ -1502,19 -1492,18 +1502,19 @@@
  	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
  
  	phy_write(tbiphy, MII_ADVERTISE,
 -			ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
 -			ADVERTISE_1000XPSE_ASYM);
 +		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
 +		  ADVERTISE_1000XPSE_ASYM);
  
 -	phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
 -			BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
 +	phy_write(tbiphy, MII_BMCR,
 +		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
 +		  BMCR_SPEED1000);
  }
  
  static void init_registers(struct net_device *dev)
  {
  	struct gfar_private *priv = netdev_priv(dev);
  	struct gfar __iomem *regs = NULL;
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_grps; i++) {
  		regs = priv->gfargrp[i].regs;
@@@ -1565,13 -1554,15 +1565,13 @@@ static int __gfar_is_rx_idle(struct gfa
  {
  	u32 res;
  
 -	/*
 -	 * Normaly TSEC should not hang on GRS commands, so we should
 +	/* Normaly TSEC should not hang on GRS commands, so we should
  	 * actually wait for IEVENT_GRSC flag.
  	 */
  	if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
  		return 0;
  
 -	/*
 -	 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
 +	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
  	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
  	 * and the Rx can be safely reset.
  	 */
@@@ -1589,7 -1580,7 +1589,7 @@@ static void gfar_halt_nodisable(struct 
  	struct gfar_private *priv = netdev_priv(dev);
  	struct gfar __iomem *regs = NULL;
  	u32 tempval;
 -	int i = 0;
 +	int i;
  
  	for (i = 0; i < priv->num_grps; i++) {
  		regs = priv->gfargrp[i].regs;
@@@ -1603,8 -1594,8 +1603,8 @@@
  	regs = priv->gfargrp[0].regs;
  	/* Stop the DMA, and wait for it to stop */
  	tempval = gfar_read(&regs->dmactrl);
 -	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
 -	    != (DMACTRL_GRS | DMACTRL_GTS)) {
 +	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
 +	    (DMACTRL_GRS | DMACTRL_GTS)) {
  		int ret;
  
  		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@@ -1669,7 -1660,7 +1669,7 @@@ void stop_gfar(struct net_device *dev
  	} else {
  		for (i = 0; i < priv->num_grps; i++)
  			free_irq(priv->gfargrp[i].interruptTransmit,
 -					&priv->gfargrp[i]);
 +				 &priv->gfargrp[i]);
  	}
  
  	free_skb_resources(priv);
@@@ -1688,13 -1679,13 +1688,13 @@@ static void free_skb_tx_queue(struct gf
  			continue;
  
  		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
 -				txbdp->length, DMA_TO_DEVICE);
 +				 txbdp->length, DMA_TO_DEVICE);
  		txbdp->lstatus = 0;
  		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
 -				j++) {
 +		     j++) {
  			txbdp++;
  			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
 -					txbdp->length, DMA_TO_DEVICE);
 +				       txbdp->length, DMA_TO_DEVICE);
  		}
  		txbdp++;
  		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@@ -1714,8 -1705,8 +1714,8 @@@ static void free_skb_rx_queue(struct gf
  	for (i = 0; i < rx_queue->rx_ring_size; i++) {
  		if (rx_queue->rx_skbuff[i]) {
  			dma_unmap_single(&priv->ofdev->dev,
 -					rxbdp->bufPtr, priv->rx_buffer_size,
 -					DMA_FROM_DEVICE);
 +					 rxbdp->bufPtr, priv->rx_buffer_size,
 +					 DMA_FROM_DEVICE);
  			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
  			rx_queue->rx_skbuff[i] = NULL;
  		}
@@@ -1727,8 -1718,7 +1727,8 @@@
  }
  
  /* If there are any tx skbs or rx skbs still around, free them.
 - * Then free tx_skbuff and rx_skbuff */
 + * Then free tx_skbuff and rx_skbuff
 + */
  static void free_skb_resources(struct gfar_private *priv)
  {
  	struct gfar_priv_tx_q *tx_queue = NULL;
@@@ -1738,25 -1728,24 +1738,25 @@@
  	/* Go through all the buffer descriptors and free their data buffers */
  	for (i = 0; i < priv->num_tx_queues; i++) {
  		struct netdev_queue *txq;
 +
  		tx_queue = priv->tx_queue[i];
  		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
 -		if(tx_queue->tx_skbuff)
 +		if (tx_queue->tx_skbuff)
  			free_skb_tx_queue(tx_queue);
  		netdev_tx_reset_queue(txq);
  	}
  
  	for (i = 0; i < priv->num_rx_queues; i++) {
  		rx_queue = priv->rx_queue[i];
 -		if(rx_queue->rx_skbuff)
 +		if (rx_queue->rx_skbuff)
  			free_skb_rx_queue(rx_queue);
  	}
  
  	dma_free_coherent(&priv->ofdev->dev,
 -			sizeof(struct txbd8) * priv->total_tx_ring_size +
 -			sizeof(struct rxbd8) * priv->total_rx_ring_size,
 -			priv->tx_queue[0]->tx_bd_base,
 -			priv->tx_queue[0]->tx_bd_dma_base);
 +			  sizeof(struct txbd8) * priv->total_tx_ring_size +
 +			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
 +			  priv->tx_queue[0]->tx_bd_base,
 +			  priv->tx_queue[0]->tx_bd_dma_base);
  	skb_queue_purge(&priv->rx_recycle);
  }
  
@@@ -1795,7 -1784,7 +1795,7 @@@ void gfar_start(struct net_device *dev
  }
  
  void gfar_configure_coalescing(struct gfar_private *priv,
 -	unsigned long tx_mask, unsigned long rx_mask)
 +			       unsigned long tx_mask, unsigned long rx_mask)
  {
  	struct gfar __iomem *regs = priv->gfargrp[0].regs;
  	u32 __iomem *baddr;
@@@ -1805,11 -1794,11 +1805,11 @@@
  	 * multiple queues, there's only single reg to program
  	 */
  	gfar_write(&regs->txic, 0);
 -	if(likely(priv->tx_queue[0]->txcoalescing))
 +	if (likely(priv->tx_queue[0]->txcoalescing))
  		gfar_write(&regs->txic, priv->tx_queue[0]->txic);
  
  	gfar_write(&regs->rxic, 0);
 -	if(unlikely(priv->rx_queue[0]->rxcoalescing))
 +	if (unlikely(priv->rx_queue[0]->rxcoalescing))
  		gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
  
  	if (priv->mode == MQ_MG_MODE) {
@@@ -1836,14 -1825,12 +1836,14 @@@ static int register_grp_irqs(struct gfa
  	int err;
  
  	/* If the device has multiple interrupts, register for
 -	 * them.  Otherwise, only register for the one */
 +	 * them.  Otherwise, only register for the one
 +	 */
  	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
  		/* Install our interrupt handlers for Error,
 -		 * Transmit, and Receive */
 -		if ((err = request_irq(grp->interruptError, gfar_error, 0,
 -				grp->int_name_er,grp)) < 0) {
 +		 * Transmit, and Receive
 +		 */
 +		if ((err = request_irq(grp->interruptError, gfar_error,
 +				       0, grp->int_name_er, grp)) < 0) {
  			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  				  grp->interruptError);
  
@@@ -1851,21 -1838,21 +1851,21 @@@
  		}
  
  		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
 -				0, grp->int_name_tx, grp)) < 0) {
 +				       0, grp->int_name_tx, grp)) < 0) {
  			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  				  grp->interruptTransmit);
  			goto tx_irq_fail;
  		}
  
 -		if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
 -				grp->int_name_rx, grp)) < 0) {
 +		if ((err = request_irq(grp->interruptReceive, gfar_receive,
 +				       0, grp->int_name_rx, grp)) < 0) {
  			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  				  grp->interruptReceive);
  			goto rx_irq_fail;
  		}
  	} else {
 -		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
 -				grp->int_name_tx, grp)) < 0) {
 +		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
 +				       0, grp->int_name_tx, grp)) < 0) {
  			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
  				  grp->interruptTransmit);
  			goto err_irq_fail;
@@@ -1925,9 -1912,8 +1925,9 @@@ irq_fail
  	return err;
  }
  
 -/* Called when something needs to use the ethernet device */
 -/* Returns 0 for success. */
 +/* Called when something needs to use the ethernet device
 + * Returns 0 for success.
 + */
  static int gfar_enet_open(struct net_device *dev)
  {
  	struct gfar_private *priv = netdev_priv(dev);
@@@ -1972,17 -1958,18 +1972,17 @@@ static inline struct txfcb *gfar_add_fc
  }
  
  static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
 -		int fcb_length)
 +				    int fcb_length)
  {
 -	u8 flags = 0;
 -
  	/* If we're here, it's a IP packet with a TCP or UDP
  	 * payload.  We set it to checksum, using a pseudo-header
  	 * we provide
  	 */
 -	flags = TXFCB_DEFAULT;
 +	u8 flags = TXFCB_DEFAULT;
  
 -	/* Tell the controller what the protocol is */
 -	/* And provide the already calculated phcs */
 +	/* Tell the controller what the protocol is
 +	 * And provide the already calculated phcs
 +	 */
  	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
  		flags |= TXFCB_UDP;
  		fcb->phcs = udp_hdr(skb)->check;
@@@ -1992,8 -1979,7 +1992,8 @@@
  	/* l3os is the distance between the start of the
  	 * frame (skb->data) and the start of the IP hdr.
  	 * l4os is the distance between the start of the
 -	 * l3 hdr and the l4 hdr */
 +	 * l3 hdr and the l4 hdr
 +	 */
  	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
  	fcb->l4os = skb_network_header_len(skb);
  
@@@ -2007,7 -1993,7 +2007,7 @@@ void inline gfar_tx_vlan(struct sk_buf
  }
  
  static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
 -			       struct txbd8 *base, int ring_size)
 +				      struct txbd8 *base, int ring_size)
  {
  	struct txbd8 *new_bd = bdp + stride;
  
@@@ -2015,14 -2001,13 +2015,14 @@@
  }
  
  static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
 -		int ring_size)
 +				      int ring_size)
  {
  	return skip_txbd(bdp, 1, base, ring_size);
  }
  
 -/* This is called by the kernel when a frame is ready for transmission. */
 -/* It is pointed to by the dev->hard_start_xmit function pointer */
 +/* This is called by the kernel when a frame is ready for transmission.
 + * It is pointed to by the dev->hard_start_xmit function pointer
 + */
  static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
  	struct gfar_private *priv = netdev_priv(dev);
@@@ -2037,12 -2022,13 +2037,12 @@@
  	unsigned long flags;
  	unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
  
 -	/*
 -	 * TOE=1 frames larger than 2500 bytes may see excess delays
 +	/* TOE=1 frames larger than 2500 bytes may see excess delays
  	 * before start of transmission.
  	 */
  	if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
 -			skb->ip_summed == CHECKSUM_PARTIAL &&
 -			skb->len > 2500)) {
 +		     skb->ip_summed == CHECKSUM_PARTIAL &&
 +		     skb->len > 2500)) {
  		int ret;
  
  		ret = skb_checksum_help(skb);
@@@ -2058,16 -2044,16 +2058,16 @@@
  
  	/* check if time stamp should be generated */
  	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 -			priv->hwts_tx_en)) {
 +		     priv->hwts_tx_en)) {
  		do_tstamp = 1;
  		fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
  	}
  
  	/* make space for additional header when fcb is needed */
  	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
 -			vlan_tx_tag_present(skb) ||
 -			unlikely(do_tstamp)) &&
 -			(skb_headroom(skb) < fcb_length)) {
 +	     vlan_tx_tag_present(skb) ||
 +	     unlikely(do_tstamp)) &&
 +	    (skb_headroom(skb) < fcb_length)) {
  		struct sk_buff *skb_new;
  
  		skb_new = skb_realloc_headroom(skb, fcb_length);
@@@ -2077,10 -2063,9 +2077,9 @@@
  			return NETDEV_TX_OK;
  		}
  
- 		/* Steal sock reference for processing TX time stamps */
- 		swap(skb_new->sk, skb->sk);
- 		swap(skb_new->destructor, skb->destructor);
- 		kfree_skb(skb);
+ 		if (skb->sk)
+ 			skb_set_owner_w(skb_new, skb->sk);
+ 		consume_skb(skb);
  		skb = skb_new;
  	}
  
@@@ -2111,12 -2096,12 +2110,12 @@@
  	/* Time stamp insertion requires one additional TxBD */
  	if (unlikely(do_tstamp))
  		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
 -				tx_queue->tx_ring_size);
 +						 tx_queue->tx_ring_size);
  
  	if (nr_frags == 0) {
  		if (unlikely(do_tstamp))
  			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
 -					TXBD_INTERRUPT);
 +							  TXBD_INTERRUPT);
  		else
  			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
  	} else {
@@@ -2128,7 -2113,7 +2127,7 @@@
  			length = skb_shinfo(skb)->frags[i].size;
  
  			lstatus = txbdp->lstatus | length |
 -				BD_LFLAG(TXBD_READY);
 +				  BD_LFLAG(TXBD_READY);
  
  			/* Handle the last BD specially */
  			if (i == nr_frags - 1)
@@@ -2158,8 -2143,8 +2157,8 @@@
  	if (CHECKSUM_PARTIAL == skb->ip_summed) {
  		fcb = gfar_add_fcb(skb);
  		/* as specified by errata */
 -		if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
 -			     && ((unsigned long)fcb % 0x20) > 0x18)) {
 +		if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
 +			     ((unsigned long)fcb % 0x20) > 0x18)) {
  			__skb_pull(skb, GMAC_FCB_LEN);
  			skb_checksum_help(skb);
  		} else {
@@@ -2187,9 -2172,10 +2186,9 @@@
  	}
  
  	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
 -			skb_headlen(skb), DMA_TO_DEVICE);
 +					     skb_headlen(skb), DMA_TO_DEVICE);
  
 -	/*
 -	 * If time stamping is requested one additional TxBD must be set up. The
 +	/* If time stamping is requested one additional TxBD must be set up. The
  	 * first TxBD points to the FCB and must have a data length of
  	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
  	 * the full frame length.
@@@ -2197,7 -2183,7 +2196,7 @@@
  	if (unlikely(do_tstamp)) {
  		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
  		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
 -				(skb_headlen(skb) - fcb_length);
 +					 (skb_headlen(skb) - fcb_length);
  		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
  	} else {
  		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@@ -2205,7 -2191,8 +2204,7 @@@
  
  	netdev_tx_sent_queue(txq, skb->len);
  
 -	/*
 -	 * We can work in parallel with gfar_clean_tx_ring(), except
 +	/* We can work in parallel with gfar_clean_tx_ring(), except
  	 * when modifying num_txbdfree. Note that we didn't grab the lock
  	 * when we were reading the num_txbdfree and checking for available
  	 * space, that's because outside of this function it can only grow,
@@@ -2218,7 -2205,8 +2217,7 @@@
  	 */
  	spin_lock_irqsave(&tx_queue->txlock, flags);
  
 -	/*
 -	 * The powerpc-specific eieio() is used, as wmb() has too strong
 +	/* The powerpc-specific eieio() is used, as wmb() has too strong
  	 * semantics (it requires synchronization between cacheable and
  	 * uncacheable mappings, which eieio doesn't provide and which we
  	 * don't need), thus requiring a more expensive sync instruction.  At
@@@ -2234,10 -2222,9 +2233,10 @@@
  	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
  
  	/* Update the current skb pointer to the next entry we will use
 -	 * (wrapping if necessary) */
 +	 * (wrapping if necessary)
 +	 */
  	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
 -		TX_RING_MOD_MASK(tx_queue->tx_ring_size);
 +			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
  
  	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
  
@@@ -2245,8 -2232,7 +2244,8 @@@
  	tx_queue->num_txbdfree -= (nr_txbds);
  
  	/* If the next BD still needs to be cleaned up, then the bds
 -	   are full.  We need to tell the kernel to stop sending us stuff. */
 +	 * are full.  We need to tell the kernel to stop sending us stuff.
 +	 */
  	if (!tx_queue->num_txbdfree) {
  		netif_tx_stop_queue(txq);
  
@@@ -2371,12 -2357,12 +2370,12 @@@ static int gfar_change_mtu(struct net_d
  
  	frame_size += priv->padding;
  
 -	tempsize =
 -	    (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
 -	    INCREMENTAL_BUFFER_SIZE;
 +	tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
 +		   INCREMENTAL_BUFFER_SIZE;
  
  	/* Only stop and start the controller if it isn't already
 -	 * stopped, and we changed something */
 +	 * stopped, and we changed something
 +	 */
  	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
  		stop_gfar(dev);
  
@@@ -2389,12 -2375,11 +2388,12 @@@
  
  	/* If the mtu is larger than the max size for standard
  	 * ethernet frames (ie, a jumbo frame), then set maccfg2
 -	 * to allow huge frames, and to check the length */
 +	 * to allow huge frames, and to check the length
 +	 */
  	tempval = gfar_read(&regs->maccfg2);
  
  	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
 -			gfar_has_errata(priv, GFAR_ERRATA_74))
 +	    gfar_has_errata(priv, GFAR_ERRATA_74))
  		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
  	else
  		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@@ -2415,7 -2400,7 +2414,7 @@@
  static void gfar_reset_task(struct work_struct *work)
  {
  	struct gfar_private *priv = container_of(work, struct gfar_private,
 -			reset_task);
 +						 reset_task);
  	struct net_device *dev = priv->ndev;
  
  	if (dev->flags & IFF_UP) {
@@@ -2442,7 -2427,7 +2441,7 @@@ static void gfar_align_skb(struct sk_bu
  	 * as many bytes as needed to align the data properly
  	 */
  	skb_reserve(skb, RXBUF_ALIGNMENT -
 -		(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
 +		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
  }
  
  /* Interrupt Handler for Transmit complete */
@@@ -2476,7 -2461,8 +2475,7 @@@ static int gfar_clean_tx_ring(struct gf
  
  		frags = skb_shinfo(skb)->nr_frags;
  
 -		/*
 -		 * When time stamping, one additional TxBD must be freed.
 +		/* When time stamping, one additional TxBD must be freed.
  		 * Also, we need to dma_unmap_single() the TxPAL.
  		 */
  		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@@ -2490,7 -2476,7 +2489,7 @@@
  
  		/* Only clean completed frames */
  		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
 -				(lstatus & BD_LENGTH_MASK))
 +		    (lstatus & BD_LENGTH_MASK))
  			break;
  
  		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@@ -2500,12 -2486,11 +2499,12 @@@
  			buflen = bdp->length;
  
  		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
 -				buflen, DMA_TO_DEVICE);
 +				 buflen, DMA_TO_DEVICE);
  
  		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
  			struct skb_shared_hwtstamps shhwtstamps;
  			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
 +
  			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
  			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@@ -2518,20 -2503,23 +2517,20 @@@
  		bdp = next_txbd(bdp, base, tx_ring_size);
  
  		for (i = 0; i < frags; i++) {
 -			dma_unmap_page(&priv->ofdev->dev,
 -					bdp->bufPtr,
 -					bdp->length,
 -					DMA_TO_DEVICE);
 +			dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
 +				       bdp->length, DMA_TO_DEVICE);
  			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
  			bdp = next_txbd(bdp, base, tx_ring_size);
  		}
  
  		bytes_sent += skb->len;
  
 -		/*
 -		 * If there's room in the queue (limit it to rx_buffer_size)
 +		/* If there's room in the queue (limit it to rx_buffer_size)
  		 * we add this skb back into the pool, if it's the right size
  		 */
  		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
 -				skb_recycle_check(skb, priv->rx_buffer_size +
 -					RXBUF_ALIGNMENT)) {
 +		    skb_recycle_check(skb, priv->rx_buffer_size +
 +				      RXBUF_ALIGNMENT)) {
  			gfar_align_skb(skb);
  			skb_queue_head(&priv->rx_recycle, skb);
  		} else
@@@ -2540,7 -2528,7 +2539,7 @@@
  		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
  
  		skb_dirtytx = (skb_dirtytx + 1) &
 -			TX_RING_MOD_MASK(tx_ring_size);
 +			      TX_RING_MOD_MASK(tx_ring_size);
  
  		howmany++;
  		spin_lock_irqsave(&tx_queue->txlock, flags);
@@@ -2570,7 -2558,8 +2569,7 @@@ static void gfar_schedule_cleanup(struc
  		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
  		__napi_schedule(&gfargrp->napi);
  	} else {
 -		/*
 -		 * Clear IEVENT, so interrupts aren't called again
 +		/* Clear IEVENT, so interrupts aren't called again
  		 * because of the packets that have already arrived.
  		 */
  		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@@ -2587,7 -2576,7 +2586,7 @@@ static irqreturn_t gfar_transmit(int ir
  }
  
  static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 -		struct sk_buff *skb)
 +			   struct sk_buff *skb)
  {
  	struct net_device *dev = rx_queue->dev;
  	struct gfar_private *priv = netdev_priv(dev);
@@@ -2598,7 -2587,7 +2597,7 @@@
  	gfar_init_rxbdp(rx_queue, bdp, buf);
  }
  
 -static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
 +static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
  {
  	struct gfar_private *priv = netdev_priv(dev);
  	struct sk_buff *skb = NULL;
@@@ -2612,7 -2601,7 +2611,7 @@@
  	return skb;
  }
  
 -struct sk_buff * gfar_new_skb(struct net_device *dev)
 +struct sk_buff *gfar_new_skb(struct net_device *dev)
  {
  	struct gfar_private *priv = netdev_priv(dev);
  	struct sk_buff *skb = NULL;
@@@ -2630,7 -2619,8 +2629,7 @@@ static inline void count_errors(unsigne
  	struct net_device_stats *stats = &dev->stats;
  	struct gfar_extra_stats *estats = &priv->extra_stats;
  
 -	/* If the packet was truncated, none of the other errors
 -	 * matter */
 +	/* If the packet was truncated, none of the other errors matter */
  	if (status & RXBD_TRUNCATED) {
  		stats->rx_length_errors++;
  
@@@ -2671,8 -2661,7 +2670,8 @@@ static inline void gfar_rx_checksum(str
  {
  	/* If valid headers were found, and valid sums
  	 * were verified, then we tell the kernel that no
 -	 * checksumming is necessary.  Otherwise, it is */
 +	 * checksumming is necessary.  Otherwise, it is [FIXME]
 +	 */
  	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
  		skb->ip_summed = CHECKSUM_UNNECESSARY;
  	else
@@@ -2680,7 -2669,8 +2679,7 @@@
  }
  
  
 -/* gfar_process_frame() -- handle one incoming packet if skb
 - * isn't NULL.  */
 +/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
  static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  			      int amount_pull, struct napi_struct *napi)
  {
@@@ -2692,9 -2682,8 +2691,9 @@@
  	/* fcb is at the beginning if exists */
  	fcb = (struct rxfcb *)skb->data;
  
 -	/* Remove the FCB from the skb */
 -	/* Remove the padded bytes, if there are any */
 +	/* Remove the FCB from the skb
 +	 * Remove the padded bytes, if there are any
 +	 */
  	if (amount_pull) {
  		skb_record_rx_queue(skb, fcb->rq);
  		skb_pull(skb, amount_pull);
@@@ -2704,7 -2693,6 +2703,7 @@@
  	if (priv->hwts_rx_en) {
  		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
  		u64 *ns = (u64 *) skb->data;
 +
  		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
  	}
@@@ -2718,7 -2706,8 +2717,7 @@@
  	/* Tell the skb what kind of packet this is */
  	skb->protocol = eth_type_trans(skb, dev);
  
 -	/*
 -	 * There's need to check for NETIF_F_HW_VLAN_RX here.
 +	/* There's need to check for NETIF_F_HW_VLAN_RX here.
  	 * Even if vlan rx accel is disabled, on some chips
  	 * RXFCB_VLN is pseudo randomly set.
  	 */
@@@ -2736,8 -2725,8 +2735,8 @@@
  }
  
  /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
 - *   until the budget/quota has been reached. Returns the number
 - *   of frames handled
 + * until the budget/quota has been reached. Returns the number
 + * of frames handled
   */
  int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
  {
@@@ -2757,7 -2746,6 +2756,7 @@@
  
  	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
  		struct sk_buff *newskb;
 +
  		rmb();
  
  		/* Add another skb for the future */
@@@ -2766,15 -2754,15 +2765,15 @@@
  		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
  
  		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
 -				priv->rx_buffer_size, DMA_FROM_DEVICE);
 +				 priv->rx_buffer_size, DMA_FROM_DEVICE);
  
  		if (unlikely(!(bdp->status & RXBD_ERR) &&
 -				bdp->length > priv->rx_buffer_size))
 +			     bdp->length > priv->rx_buffer_size))
  			bdp->status = RXBD_LARGE;
  
  		/* We drop the frame if we failed to allocate a new buffer */
  		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
 -				 bdp->status & RXBD_ERR)) {
 +			     bdp->status & RXBD_ERR)) {
  			count_errors(bdp->status, dev);
  
  			if (unlikely(!newskb))
@@@ -2793,7 -2781,7 +2792,7 @@@
  				rx_queue->stats.rx_bytes += pkt_len;
  				skb_record_rx_queue(skb, rx_queue->qindex);
  				gfar_process_frame(dev, skb, amount_pull,
 -						&rx_queue->grp->napi);
 +						   &rx_queue->grp->napi);
  
  			} else {
  				netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@@ -2812,8 -2800,9 +2811,8 @@@
  		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
  
  		/* update to point at the next skb */
 -		rx_queue->skb_currx =
 -		    (rx_queue->skb_currx + 1) &
 -		    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
 +		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
 +				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
  	}
  
  	/* Update the current rxbd pointer to be the next one */
@@@ -2824,8 -2813,8 +2823,8 @@@
  
  static int gfar_poll(struct napi_struct *napi, int budget)
  {
 -	struct gfar_priv_grp *gfargrp = container_of(napi,
 -			struct gfar_priv_grp, napi);
 +	struct gfar_priv_grp *gfargrp =
 +		container_of(napi, struct gfar_priv_grp, napi);
  	struct gfar_private *priv = gfargrp->priv;
  	struct gfar __iomem *regs = gfargrp->regs;
  	struct gfar_priv_tx_q *tx_queue = NULL;
@@@ -2839,11 -2828,11 +2838,11 @@@
  	budget_per_queue = budget/num_queues;
  
  	/* Clear IEVENT, so interrupts aren't called again
 -	 * because of the packets that have already arrived */
 +	 * because of the packets that have already arrived
 +	 */
  	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
  
  	while (num_queues && left_over_budget) {
 -
  		budget_per_queue = left_over_budget/num_queues;
  		left_over_budget = 0;
  
@@@ -2854,13 -2843,12 +2853,13 @@@
  			tx_queue = priv->tx_queue[rx_queue->qindex];
  
  			tx_cleaned += gfar_clean_tx_ring(tx_queue);
 -			rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
 -							budget_per_queue);
 +			rx_cleaned_per_queue =
 +				gfar_clean_rx_ring(rx_queue, budget_per_queue);
  			rx_cleaned += rx_cleaned_per_queue;
 -			if(rx_cleaned_per_queue < budget_per_queue) {
 +			if (rx_cleaned_per_queue < budget_per_queue) {
  				left_over_budget = left_over_budget +
 -					(budget_per_queue - rx_cleaned_per_queue);
 +					(budget_per_queue -
 +					 rx_cleaned_per_queue);
  				set_bit(i, &serviced_queues);
  				num_queues--;
  			}
@@@ -2878,25 -2866,25 +2877,25 @@@
  
  		gfar_write(&regs->imask, IMASK_DEFAULT);
  
 -		/* If we are coalescing interrupts, update the timer */
 -		/* Otherwise, clear it */
 -		gfar_configure_coalescing(priv,
 -				gfargrp->rx_bit_map, gfargrp->tx_bit_map);
 +		/* If we are coalescing interrupts, update the timer
 +		 * Otherwise, clear it
 +		 */
 +		gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
 +					  gfargrp->tx_bit_map);
  	}
  
  	return rx_cleaned;
  }
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
 -/*
 - * Polling 'interrupt' - used by things like netconsole to send skbs
 +/* Polling 'interrupt' - used by things like netconsole to send skbs
   * without having to re-enable interrupts. It's not called while
   * the interrupt routine is executing.
   */
  static void gfar_netpoll(struct net_device *dev)
  {
  	struct gfar_private *priv = netdev_priv(dev);
 -	int i = 0;
 +	int i;
  
  	/* If the device has multiple interrupts, run tx/rx */
  	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@@ -2905,7 -2893,7 +2904,7 @@@
  			disable_irq(priv->gfargrp[i].interruptReceive);
  			disable_irq(priv->gfargrp[i].interruptError);
  			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
 -						&priv->gfargrp[i]);
 +				       &priv->gfargrp[i]);
  			enable_irq(priv->gfargrp[i].interruptError);
  			enable_irq(priv->gfargrp[i].interruptReceive);
  			enable_irq(priv->gfargrp[i].interruptTransmit);
@@@ -2914,7 -2902,7 +2913,7 @@@
  		for (i = 0; i < priv->num_grps; i++) {
  			disable_irq(priv->gfargrp[i].interruptTransmit);
  			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
 -						&priv->gfargrp[i]);
 +				       &priv->gfargrp[i]);
  			enable_irq(priv->gfargrp[i].interruptTransmit);
  		}
  	}
@@@ -2966,8 -2954,7 +2965,8 @@@ static void adjust_link(struct net_devi
  		u32 ecntrl = gfar_read(&regs->ecntrl);
  
  		/* Now we make sure that we can be in full duplex mode.
 -		 * If not, we operate in half-duplex mode. */
 +		 * If not, we operate in half-duplex mode.
 +		 */
  		if (phydev->duplex != priv->oldduplex) {
  			new_state = 1;
  			if (!(phydev->duplex))
@@@ -2993,8 -2980,7 +2992,8 @@@
  				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
  
  				/* Reduced mode distinguishes
 -				 * between 10 and 100 */
 +				 * between 10 and 100
 +				 */
  				if (phydev->speed == SPEED_100)
  					ecntrl |= ECNTRL_R100;
  				else
@@@ -3033,8 -3019,7 +3032,8 @@@
  /* Update the hash table based on the current list of multicast
   * addresses we subscribe to.  Also, change the promiscuity of
   * the device based on the flags (this function is called
 - * whenever dev->flags is changed */
 + * whenever dev->flags is changed
 + */
  static void gfar_set_multi(struct net_device *dev)
  {
  	struct netdev_hw_addr *ha;
@@@ -3096,8 -3081,7 +3095,8 @@@
  
  		/* If we have extended hash tables, we need to
  		 * clear the exact match registers to prepare for
 -		 * setting them */
 +		 * setting them
 +		 */
  		if (priv->extended_hash) {
  			em_num = GFAR_EM_NUM + 1;
  			gfar_clear_exact_match(dev);
@@@ -3123,14 -3107,13 +3122,14 @@@
  
  
  /* Clears each of the exact match registers to zero, so they
 - * don't interfere with normal reception */
 + * don't interfere with normal reception
 + */
  static void gfar_clear_exact_match(struct net_device *dev)
  {
  	int idx;
  	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  
 -	for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
 +	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
  		gfar_set_mac_for_addr(dev, idx, zero_arr);
  }
  
@@@ -3146,8 -3129,7 +3145,8 @@@
   * hash index which gaddr register to use, and the 5 other bits
   * indicate which bit (assuming an IBM numbering scheme, which
   * for PowerPC (tm) is usually the case) in the register holds
 - * the entry. */
 + * the entry.
 + */
  static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
  {
  	u32 tempval;
@@@ -3179,9 -3161,8 +3178,9 @@@ static void gfar_set_mac_for_addr(struc
  
  	macptr += num*2;
  
 -	/* Now copy it into the mac registers backwards, cuz */
 -	/* little endian is silly */
 +	/* Now copy it into the mac registers backwards, cuz
 +	 * little endian is silly
 +	 */
  	for (idx = 0; idx < ETH_ALEN; idx++)
  		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
  
@@@ -3213,8 -3194,7 +3212,8 @@@ static irqreturn_t gfar_error(int irq, 
  
  	/* Hmm... */
  	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
 -		netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
 +		netdev_dbg(dev,
 +			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
  			   events, gfar_read(&regs->imask));
  
  	/* Update the error counters */
diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9313f5c,e242104..59a3f14
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@@ -516,7 -516,7 +516,7 @@@ static void ixgbe_get_hw_control(struc
  			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  }
  
 -/*
 +/**
   * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
   * @adapter: pointer to adapter struct
   * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@@ -790,10 -790,12 +790,10 @@@ static bool ixgbe_clean_tx_irq(struct i
  		total_packets += tx_buffer->gso_segs;
  
  #ifdef CONFIG_IXGBE_PTP
 -		if (unlikely(tx_buffer->tx_flags &
 -			     IXGBE_TX_FLAGS_TSTAMP))
 -			ixgbe_ptp_tx_hwtstamp(q_vector,
 -					      tx_buffer->skb);
 -
 +		if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
 +			ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
  #endif
 +
  		/* free the skb */
  		dev_kfree_skb_any(tx_buffer->skb);
  
@@@ -1397,7 -1399,8 +1397,7 @@@ static void ixgbe_process_skb_fields(st
  	ixgbe_rx_checksum(rx_ring, rx_desc, skb);
  
  #ifdef CONFIG_IXGBE_PTP
 -	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
 -		ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
 +	ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
  #endif
  
  	if ((dev->features & NETIF_F_HW_VLAN_RX) &&
@@@ -3591,7 -3594,7 +3591,7 @@@ static void ixgbe_napi_disable_all(stru
  }
  
  #ifdef CONFIG_IXGBE_DCB
 -/*
 +/**
   * ixgbe_configure_dcb - Configure DCB hardware
   * @adapter: ixgbe adapter struct
   *
@@@ -3658,11 -3661,11 +3658,11 @@@ static void ixgbe_configure_dcb(struct 
  /* Additional bittime to account for IXGBE framing */
  #define IXGBE_ETH_FRAMING 20
  
 -/*
 +/**
   * ixgbe_hpbthresh - calculate high water mark for flow control
   *
   * @adapter: board private structure to calculate for
 - * @pb - packet buffer to calculate
 + * @pb: packet buffer to calculate
   */
  static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
  {
@@@ -3722,11 -3725,11 +3722,11 @@@
  	return marker;
  }
  
 -/*
 +/**
   * ixgbe_lpbthresh - calculate low water mark for for flow control
   *
   * @adapter: board private structure to calculate for
 - * @pb - packet buffer to calculate
 + * @pb: packet buffer to calculate
   */
  static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
  {
@@@ -5243,7 -5246,7 +5243,7 @@@ void ixgbe_update_stats(struct ixgbe_ad
  
  /**
   * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
 - * @adapter - pointer to the device adapter structure
 + * @adapter: pointer to the device adapter structure
   **/
  static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
  {
@@@ -5279,7 -5282,7 +5279,7 @@@
  
  /**
   * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
 - * @adapter - pointer to the device adapter structure
 + * @adapter: pointer to the device adapter structure
   *
   * This function serves two purposes.  First it strobes the interrupt lines
   * in order to make certain interrupts are occurring.  Secondly it sets the
@@@ -5327,8 -5330,8 +5327,8 @@@ static void ixgbe_check_hang_subtask(st
  
  /**
   * ixgbe_watchdog_update_link - update the link status
 - * @adapter - pointer to the device adapter structure
 - * @link_speed - pointer to a u32 to store the link_speed
 + * @adapter: pointer to the device adapter structure
 + * @link_speed: pointer to a u32 to store the link_speed
   **/
  static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
  {
@@@ -5371,7 -5374,7 +5371,7 @@@
  /**
   * ixgbe_watchdog_link_is_up - update netif_carrier status and
   *                             print link up message
 - * @adapter - pointer to the device adapter structure
 + * @adapter: pointer to the device adapter structure
   **/
  static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
  {
@@@ -5431,7 -5434,7 +5431,7 @@@
  /**
   * ixgbe_watchdog_link_is_down - update netif_carrier status and
   *                               print link down message
 - * @adapter - pointer to the adapter structure
 + * @adapter: pointer to the adapter structure
   **/
  static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
  {
@@@ -5459,7 -5462,7 +5459,7 @@@
  
  /**
   * ixgbe_watchdog_flush_tx - flush queues on link down
 - * @adapter - pointer to the device adapter structure
 + * @adapter: pointer to the device adapter structure
   **/
  static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
  {
@@@ -5508,7 -5511,7 +5508,7 @@@ static void ixgbe_spoof_check(struct ix
  
  /**
   * ixgbe_watchdog_subtask - check and bring link up
 - * @adapter - pointer to the device adapter structure
 + * @adapter: pointer to the device adapter structure
   **/
  static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
  {
@@@ -5532,7 -5535,7 +5532,7 @@@
  
  /**
   * ixgbe_sfp_detection_subtask - poll for SFP+ cable
 - * @adapter - the ixgbe adapter structure
 + * @adapter: the ixgbe adapter structure
   **/
  static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
  {
@@@ -5599,7 -5602,7 +5599,7 @@@ sfp_out
  
  /**
   * ixgbe_sfp_link_config_subtask - set up link SFP after module install
 - * @adapter - the ixgbe adapter structure
 + * @adapter: the ixgbe adapter structure
   **/
  static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
  {
@@@ -6386,12 -6389,17 +6386,12 @@@ static netdev_tx_t ixgbe_xmit_frame(str
  	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  	struct ixgbe_ring *tx_ring;
  
 -	if (skb->len <= 0) {
 -		dev_kfree_skb_any(skb);
 -		return NETDEV_TX_OK;
 -	}
 -
  	/*
  	 * The minimum packet size for olinfo paylen is 17 so pad the skb
  	 * in order to meet this minimum size requirement.
  	 */
 -	if (skb->len < 17) {
 -		if (skb_padto(skb, 17))
 +	if (unlikely(skb->len < 17)) {
 +		if (skb_pad(skb, 17 - skb->len))
  			return NETDEV_TX_OK;
  		skb->len = 17;
  	}
@@@ -6586,9 -6594,8 +6586,9 @@@ static struct rtnl_link_stats64 *ixgbe_
  }
  
  #ifdef CONFIG_IXGBE_DCB
 -/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
 - * #adapter: pointer to ixgbe_adapter
 +/**
 + * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
 + * @adapter: pointer to ixgbe_adapter
   * @tc: number of traffic classes currently enabled
   *
   * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@@ -6623,8 -6630,8 +6623,8 @@@ static void ixgbe_validate_rtr(struct i
  	return;
  }
  
 -/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
 - * classes.
 +/**
 + * ixgbe_setup_tc - configure net_device for multiple traffic classes
   *
   * @netdev: net device to configure
   * @tc: number of traffic classes to enable
@@@ -6640,6 -6647,11 +6640,11 @@@ int ixgbe_setup_tc(struct net_device *d
  		return -EINVAL;
  	}
  
+ 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ 		e_err(drv, "Enable failed, SR-IOV enabled\n");
+ 		return -EINVAL;
+ 	}
+ 
  	/* Hardware supports up to 8 traffic classes */
  	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
  	    (hw->mac.type == ixgbe_mac_82598EB &&
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ea3bc09,ea3003e..f6b04c1
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@@ -133,12 -133,6 +133,12 @@@ static const u32 default_msg_level = (N
  				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
  				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
  
 +#define STMMAC_DEFAULT_LPI_TIMER	1000
 +static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 +module_param(eee_timer, int, S_IRUGO | S_IWUSR);
 +MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 +#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
 +
  static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
  
  #ifdef CONFIG_STMMAC_DEBUG_FS
@@@ -167,8 -161,6 +167,8 @@@ static void stmmac_verify_args(void
  		flow_ctrl = FLOW_OFF;
  	if (unlikely((pause < 0) || (pause > 0xffff)))
  		pause = PAUSE_TIME;
 +	if (eee_timer < 0)
 +		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
  }
  
  static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@@ -237,85 -229,6 +237,85 @@@ static inline void stmmac_hw_fix_mac_sp
  					  phydev->speed);
  }
  
 +static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 +{
 +	/* Check and enter in LPI mode */
 +	if ((priv->dirty_tx == priv->cur_tx) &&
 +	    (priv->tx_path_in_lpi_mode == false))
 +		priv->hw->mac->set_eee_mode(priv->ioaddr);
 +}
 +
 +void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 +{
 +	/* Exit and disable EEE in case of we are are in LPI state. */
 +	priv->hw->mac->reset_eee_mode(priv->ioaddr);
 +	del_timer_sync(&priv->eee_ctrl_timer);
 +	priv->tx_path_in_lpi_mode = false;
 +}
 +
 +/**
 + * stmmac_eee_ctrl_timer
 + * @arg : data hook
 + * Description:
 + *  If there is no data transfer and if we are not in LPI state,
 + *  then MAC Transmitter can be moved to LPI state.
 + */
 +static void stmmac_eee_ctrl_timer(unsigned long arg)
 +{
 +	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
 +
 +	stmmac_enable_eee_mode(priv);
 +	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
 +}
 +
 +/**
 + * stmmac_eee_init
 + * @priv: private device pointer
 + * Description:
 + *  If the EEE support has been enabled while configuring the driver,
 + *  if the GMAC actually supports the EEE (from the HW cap reg) and the
 + *  phy can also manage EEE, so enable the LPI state and start the timer
 + *  to verify if the tx path can enter in LPI state.
 + */
 +bool stmmac_eee_init(struct stmmac_priv *priv)
 +{
 +	bool ret = false;
 +
 +	/* MAC core supports the EEE feature. */
 +	if (priv->dma_cap.eee) {
 +		/* Check if the PHY supports EEE */
 +		if (phy_init_eee(priv->phydev, 1))
 +			goto out;
 +
 +		priv->eee_active = 1;
 +		init_timer(&priv->eee_ctrl_timer);
 +		priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
 +		priv->eee_ctrl_timer.data = (unsigned long)priv;
 +		priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
 +		add_timer(&priv->eee_ctrl_timer);
 +
 +		priv->hw->mac->set_eee_timer(priv->ioaddr,
 +					     STMMAC_DEFAULT_LIT_LS_TIMER,
 +					     priv->tx_lpi_timer);
 +
 +		pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
 +
 +		ret = true;
 +	}
 +out:
 +	return ret;
 +}
 +
 +static void stmmac_eee_adjust(struct stmmac_priv *priv)
 +{
 +	/* When the EEE has been already initialised we have to
 +	 * modify the PLS bit in the LPI ctrl & status reg according
 +	 * to the PHY link status. For this reason.
 +	 */
 +	if (priv->eee_enabled)
 +		priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
 +}
 +
  /**
   * stmmac_adjust_link
   * @dev: net device structure
@@@ -336,7 -249,6 +336,7 @@@ static void stmmac_adjust_link(struct n
  	    phydev->addr, phydev->link);
  
  	spin_lock_irqsave(&priv->lock, flags);
 +
  	if (phydev->link) {
  		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
  
@@@ -403,8 -315,6 +403,8 @@@
  	if (new_state && netif_msg_link(priv))
  		phy_print_status(phydev);
  
 +	stmmac_eee_adjust(priv);
 +
  	spin_unlock_irqrestore(&priv->lock, flags);
  
  	DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@@ -422,7 -332,7 +422,7 @@@ static int stmmac_init_phy(struct net_d
  {
  	struct stmmac_priv *priv = netdev_priv(dev);
  	struct phy_device *phydev;
 -	char phy_id[MII_BUS_ID_SIZE + 3];
 +	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
  	char bus_id[MII_BUS_ID_SIZE];
  	int interface = priv->plat->interface;
  	priv->oldlink = 0;
@@@ -436,12 -346,11 +436,12 @@@
  		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
  				priv->plat->bus_id);
  
 -	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 +	snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
  		 priv->plat->phy_addr);
 -	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
 +	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
  
 -	phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
 +	phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
 +			     interface);
  
  	if (IS_ERR(phydev)) {
  		pr_err("%s: Could not attach to PHY\n", dev->name);
@@@ -768,7 -677,7 +768,7 @@@ static void stmmac_tx(struct stmmac_pri
  
  		priv->hw->desc->release_tx_desc(p);
  
 -		entry = (++priv->dirty_tx) % txsize;
 +		priv->dirty_tx++;
  	}
  	if (unlikely(netif_queue_stopped(priv->dev) &&
  		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@@ -780,11 -689,6 +780,11 @@@
  		}
  		netif_tx_unlock(priv->dev);
  	}
 +
 +	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
 +		stmmac_enable_eee_mode(priv);
 +		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
 +	}
  	spin_unlock(&priv->tx_lock);
  }
  
@@@ -1123,17 -1027,6 +1123,17 @@@ static int stmmac_open(struct net_devic
  		}
  	}
  
 +	/* Request the IRQ lines */
 +	if (priv->lpi_irq != -ENXIO) {
 +		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
 +				  dev->name, dev);
 +		if (unlikely(ret < 0)) {
 +			pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
 +			       __func__, priv->lpi_irq, ret);
 +			goto open_error_lpiirq;
 +		}
 +	}
 +
  	/* Enable the MAC Rx/Tx */
  	stmmac_set_mac(priv->ioaddr, true);
  
@@@ -1169,19 -1062,12 +1169,19 @@@
  	if (priv->phydev)
  		phy_start(priv->phydev);
  
 +	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
 +	priv->eee_enabled = stmmac_eee_init(priv);
 +
  	napi_enable(&priv->napi);
  	skb_queue_head_init(&priv->rx_recycle);
  	netif_start_queue(dev);
  
  	return 0;
  
 +open_error_lpiirq:
 +	if (priv->wol_irq != dev->irq)
 +		free_irq(priv->wol_irq, dev);
 +
  open_error_wolirq:
  	free_irq(dev->irq, dev);
  
@@@ -1207,9 -1093,6 +1207,9 @@@ static int stmmac_release(struct net_de
  {
  	struct stmmac_priv *priv = netdev_priv(dev);
  
 +	if (priv->eee_enabled)
 +		del_timer_sync(&priv->eee_ctrl_timer);
 +
  	/* Stop and disconnect the PHY */
  	if (priv->phydev) {
  		phy_stop(priv->phydev);
@@@ -1232,8 -1115,6 +1232,8 @@@
  	free_irq(dev->irq, dev);
  	if (priv->wol_irq != dev->irq)
  		free_irq(priv->wol_irq, dev);
 +	if (priv->lpi_irq != -ENXIO)
 +		free_irq(priv->lpi_irq, dev);
  
  	/* Stop TX/RX DMA and clear the descriptors */
  	priv->hw->dma->stop_tx(priv->ioaddr);
@@@ -1283,9 -1164,6 +1283,9 @@@ static netdev_tx_t stmmac_xmit(struct s
  
  	spin_lock(&priv->tx_lock);
  
 +	if (priv->tx_path_in_lpi_mode)
 +		stmmac_disable_eee_mode(priv);
 +
  	entry = priv->cur_tx % txsize;
  
  #ifdef STMMAC_XMIT_DEBUG
@@@ -1334,6 -1212,7 +1334,7 @@@
  		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
  		wmb();
  		priv->hw->desc->set_tx_owner(desc);
+ 		wmb();
  	}
  
  	/* Interrupt on completition only for the latest segment */
@@@ -1349,6 -1228,7 +1350,7 @@@
  
  	/* To avoid raise condition */
  	priv->hw->desc->set_tx_owner(first);
+ 	wmb();
  
  	priv->cur_tx++;
  
@@@ -1412,6 -1292,7 +1414,7 @@@ static inline void stmmac_rx_refill(str
  		}
  		wmb();
  		priv->hw->desc->set_rx_owner(p + entry);
+ 		wmb();
  	}
  }
  
@@@ -1430,6 -1311,7 +1433,6 @@@ static int stmmac_rx(struct stmmac_pri
  		display_ring(priv->dma_rx, rxsize);
  	}
  #endif
 -	count = 0;
  	while (!priv->hw->desc->get_rx_owner(p)) {
  		int status;
  
@@@ -1662,37 -1544,10 +1665,37 @@@ static irqreturn_t stmmac_interrupt(in
  		return IRQ_NONE;
  	}
  
 -	if (priv->plat->has_gmac)
 -		/* To handle GMAC own interrupts */
 -		priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
 +	/* To handle GMAC own interrupts */
 +	if (priv->plat->has_gmac) {
 +		int status = priv->hw->mac->host_irq_status((void __iomem *)
 +							    dev->base_addr);
 +		if (unlikely(status)) {
 +			if (status & core_mmc_tx_irq)
 +				priv->xstats.mmc_tx_irq_n++;
 +			if (status & core_mmc_rx_irq)
 +				priv->xstats.mmc_rx_irq_n++;
 +			if (status & core_mmc_rx_csum_offload_irq)
 +				priv->xstats.mmc_rx_csum_offload_irq_n++;
 +			if (status & core_irq_receive_pmt_irq)
 +				priv->xstats.irq_receive_pmt_irq_n++;
 +
 +			/* For LPI we need to save the tx status */
 +			if (status & core_irq_tx_path_in_lpi_mode) {
 +				priv->xstats.irq_tx_path_in_lpi_mode_n++;
 +				priv->tx_path_in_lpi_mode = true;
 +			}
 +			if (status & core_irq_tx_path_exit_lpi_mode) {
 +				priv->xstats.irq_tx_path_exit_lpi_mode_n++;
 +				priv->tx_path_in_lpi_mode = false;
 +			}
 +			if (status & core_irq_rx_path_in_lpi_mode)
 +				priv->xstats.irq_rx_path_in_lpi_mode_n++;
 +			if (status & core_irq_rx_path_exit_lpi_mode)
 +				priv->xstats.irq_rx_path_exit_lpi_mode_n++;
 +		}
 +	}
  
 +	/* To handle DMA interrupts */
  	stmmac_dma_interrupt(priv);
  
  	return IRQ_HANDLED;
@@@ -2278,38 -2133,42 +2281,38 @@@ static int __init stmmac_cmdline_opt(ch
  		return -EINVAL;
  	while ((opt = strsep(&str, ",")) != NULL) {
  		if (!strncmp(opt, "debug:", 6)) {
 -			if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
 +			if (kstrtoint(opt + 6, 0, &debug))
  				goto err;
  		} else if (!strncmp(opt, "phyaddr:", 8)) {
 -			if (strict_strtoul(opt + 8, 0,
 -					   (unsigned long *)&phyaddr))
 +			if (kstrtoint(opt + 8, 0, &phyaddr))
  				goto err;
  		} else if (!strncmp(opt, "dma_txsize:", 11)) {
 -			if (strict_strtoul(opt + 11, 0,
 -					   (unsigned long *)&dma_txsize))
 +			if (kstrtoint(opt + 11, 0, &dma_txsize))
  				goto err;
  		} else if (!strncmp(opt, "dma_rxsize:", 11)) {
 -			if (strict_strtoul(opt + 11, 0,
 -					   (unsigned long *)&dma_rxsize))
 +			if (kstrtoint(opt + 11, 0, &dma_rxsize))
  				goto err;
  		} else if (!strncmp(opt, "buf_sz:", 7)) {
 -			if (strict_strtoul(opt + 7, 0,
 -					   (unsigned long *)&buf_sz))
 +			if (kstrtoint(opt + 7, 0, &buf_sz))
  				goto err;
  		} else if (!strncmp(opt, "tc:", 3)) {
 -			if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
 +			if (kstrtoint(opt + 3, 0, &tc))
  				goto err;
  		} else if (!strncmp(opt, "watchdog:", 9)) {
 -			if (strict_strtoul(opt + 9, 0,
 -					   (unsigned long *)&watchdog))
 +			if (kstrtoint(opt + 9, 0, &watchdog))
  				goto err;
  		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
 -			if (strict_strtoul(opt + 10, 0,
 -					   (unsigned long *)&flow_ctrl))
 +			if (kstrtoint(opt + 10, 0, &flow_ctrl))
  				goto err;
  		} else if (!strncmp(opt, "pause:", 6)) {
 -			if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
 +			if (kstrtoint(opt + 6, 0, &pause))
 +				goto err;
 +		} else if (!strncmp(opt, "eee_timer:", 6)) {
 +			if (kstrtoint(opt + 10, 0, &eee_timer))
  				goto err;
  #ifdef CONFIG_STMMAC_TIMER
  		} else if (!strncmp(opt, "tmrate:", 7)) {
 -			if (strict_strtoul(opt + 7, 0,
 -					   (unsigned long *)&tmrate))
 +			if (kstrtoint(opt + 7, 0, &tmrate))
  				goto err;
  #endif
  		}
diff --combined drivers/net/usb/qmi_wwan.c
index b9cc5f7,a051ced..85c983d
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@@ -1,10 -1,6 +1,10 @@@
  /*
   * Copyright (c) 2012  Bjørn Mork <bjorn at mork.no>
   *
 + * The probing code is heavily inspired by cdc_ether, which is:
 + * Copyright (C) 2003-2005 by David Brownell
 + * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
 + *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * version 2 as published by the Free Software Foundation.
@@@ -19,7 -15,11 +19,7 @@@
  #include <linux/usb/usbnet.h>
  #include <linux/usb/cdc-wdm.h>
  
 -/* The name of the CDC Device Management driver */
 -#define DM_DRIVER "cdc_wdm"
 -
 -/*
 - * This driver supports wwan (3G/LTE/?) devices using a vendor
 +/* This driver supports wwan (3G/LTE/?) devices using a vendor
   * specific management protocol called Qualcomm MSM Interface (QMI) -
   * in addition to the more common AT commands over serial interface
   * management
@@@ -31,117 -31,59 +31,117 @@@
   * management protocol is used in place of the standard CDC
   * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
   *
 + * Alternatively, control and data functions can be combined in a
 + * single USB interface.
 + *
   * Handling a protocol like QMI is out of the scope for any driver.
 - * It can be exported as a character device using the cdc-wdm driver,
 - * which will enable userspace applications ("modem managers") to
 - * handle it.  This may be required to use the network interface
 - * provided by the driver.
 + * It is exported as a character device using the cdc-wdm driver as
 + * a subdriver, enabling userspace applications ("modem managers") to
 + * handle it.
   *
   * These devices may alternatively/additionally be configured using AT
 - * commands on any of the serial interfaces driven by the option driver
 - *
 - * This driver binds only to the data ("slave") interface to enable
 - * the cdc-wdm driver to bind to the control interface.  It still
 - * parses the CDC functional descriptors on the control interface to
 - *  a) verify that this is indeed a handled interface (CDC Union
 - *     header lists it as slave)
 - *  b) get MAC address and other ethernet config from the CDC Ethernet
 - *     header
 - *  c) enable user bind requests against the control interface, which
 - *     is the common way to bind to CDC Ethernet Control Model type
 - *     interfaces
 - *  d) provide a hint to the user about which interface is the
 - *     corresponding management interface
 + * commands on a serial interface
   */
  
 +/* driver specific data */
 +struct qmi_wwan_state {
 +	struct usb_driver *subdriver;
 +	atomic_t pmcount;
 +	unsigned long unused;
 +	struct usb_interface *control;
 +	struct usb_interface *data;
 +};
 +
 +/* using a counter to merge subdriver requests with our own into a combined state */
 +static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 +{
 +	struct qmi_wwan_state *info = (void *)&dev->data;
 +	int rv = 0;
 +
 +	dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
 +
 +	if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
 +		/* need autopm_get/put here to ensure the usbcore sees the new value */
 +		rv = usb_autopm_get_interface(dev->intf);
 +		if (rv < 0)
 +			goto err;
 +		dev->intf->needs_remote_wakeup = on;
 +		usb_autopm_put_interface(dev->intf);
 +	}
 +err:
 +	return rv;
 +}
 +
 +static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 +{
 +	struct usbnet *dev = usb_get_intfdata(intf);
 +
 +	/* can be called while disconnecting */
 +	if (!dev)
 +		return 0;
 +	return qmi_wwan_manage_power(dev, on);
 +}
 +
 +/* collect all three endpoints and register subdriver */
 +static int qmi_wwan_register_subdriver(struct usbnet *dev)
 +{
 +	int rv;
 +	struct usb_driver *subdriver = NULL;
 +	struct qmi_wwan_state *info = (void *)&dev->data;
 +
 +	/* collect bulk endpoints */
 +	rv = usbnet_get_endpoints(dev, info->data);
 +	if (rv < 0)
 +		goto err;
 +
 +	/* update status endpoint if separate control interface */
 +	if (info->control != info->data)
 +		dev->status = &info->control->cur_altsetting->endpoint[0];
 +
 +	/* require interrupt endpoint for subdriver */
 +	if (!dev->status) {
 +		rv = -EINVAL;
 +		goto err;
 +	}
 +
 +	/* for subdriver power management */
 +	atomic_set(&info->pmcount, 0);
 +
 +	/* register subdriver */
 +	subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
 +	if (IS_ERR(subdriver)) {
 +		dev_err(&info->control->dev, "subdriver registration failed\n");
 +		rv = PTR_ERR(subdriver);
 +		goto err;
 +	}
 +
 +	/* prevent usbnet from using status endpoint */
 +	dev->status = NULL;
 +
 +	/* save subdriver struct for suspend/resume wrappers */
 +	info->subdriver = subdriver;
 +
 +err:
 +	return rv;
 +}
 +
  static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
  {
  	int status = -1;
 -	struct usb_interface *control = NULL;
  	u8 *buf = intf->cur_altsetting->extra;
  	int len = intf->cur_altsetting->extralen;
  	struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
  	struct usb_cdc_union_desc *cdc_union = NULL;
  	struct usb_cdc_ether_desc *cdc_ether = NULL;
 -	u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
  	u32 found = 0;
 -	atomic_t *pmcount = (void *)&dev->data[1];
 -
 -	atomic_set(pmcount, 0);
 +	struct usb_driver *driver = driver_of(intf);
 +	struct qmi_wwan_state *info = (void *)&dev->data;
  
 -	/*
 -	 * assume a data interface has no additional descriptors and
 -	 * that the control and data interface are numbered
 -	 * consecutively - this holds for the Huawei device at least
 -	 */
 -	if (len == 0 && desc->bInterfaceNumber > 0) {
 -		control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
 -		if (!control)
 -			goto err;
 +	BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
  
 -		buf = control->cur_altsetting->extra;
 -		len = control->cur_altsetting->extralen;
 -		dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
 -			dev_name(&control->dev));
 -	}
 +	/* require a single interrupt status endpoint for subdriver */
 +	if (intf->cur_altsetting->desc.bNumEndpoints != 1)
 +		goto err;
  
  	while (len > 3) {
  		struct usb_descriptor_header *h = (void *)buf;
@@@ -200,23 -142,15 +200,23 @@@ next_desc
  	}
  
  	/* did we find all the required ones? */
 -	if ((found & required) != required) {
 +	if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
 +	    !(found & (1 << USB_CDC_UNION_TYPE))) {
  		dev_err(&intf->dev, "CDC functional descriptors missing\n");
  		goto err;
  	}
  
 -	/* give the user a helpful hint if trying to bind to the wrong interface */
 -	if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) {
 -		dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n",
 -			dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev));
 +	/* verify CDC Union */
 +	if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
 +		dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
 +		goto err;
 +	}
 +
 +	/* need to save these for unbind */
 +	info->control = intf;
 +	info->data = usb_ifnum_to_if(dev->udev,	cdc_union->bSlaveInterface0);
 +	if (!info->data) {
 +		dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
  		goto err;
  	}
  
@@@ -226,29 -160,63 +226,29 @@@
  		usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
  	}
  
 -	/* success! point the user to the management interface */
 -	if (control)
 -		dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n",
 -			dev_name(&control->dev));
 -
 -	/* XXX: add a sysfs symlink somewhere to help management applications find it? */
 +	/* claim data interface and set it up */
 +	status = usb_driver_claim_interface(driver, info->data, dev);
 +	if (status < 0)
 +		goto err;
  
 -	/* collect bulk endpoints now that we know intf == "data" interface */
 -	status = usbnet_get_endpoints(dev, intf);
 +	status = qmi_wwan_register_subdriver(dev);
 +	if (status < 0) {
 +		usb_set_intfdata(info->data, NULL);
 +		usb_driver_release_interface(driver, info->data);
 +	}
  
  err:
  	return status;
  }
  
  /* Some devices combine the "control" and "data" functions into a
   * single interface with all three endpoints: interrupt + bulk in and
   * out
 - *
 - * Setting up cdc-wdm as a subdriver owning the interrupt endpoint
 - * will let it provide userspace access to the encapsulated QMI
 - * protocol without interfering with the usbnet operations.
 -  */
 + */
  static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
  {
  	int rv;
 -	struct usb_driver *subdriver = NULL;
 -	atomic_t *pmcount = (void *)&dev->data[1];
 +	struct qmi_wwan_state *info = (void *)&dev->data;
  
  	/* ZTE makes devices where the interface descriptors and endpoint
  	 * configurations of two or more interfaces are identical, even
@@@ -264,39 -232,43 +264,39 @@@
  		goto err;
  	}
  
 -	atomic_set(pmcount, 0);
 -
 -	/* collect all three endpoints */
 -	rv = usbnet_get_endpoints(dev, intf);
 -	if (rv < 0)
 -		goto err;
 -
 -	/* require interrupt endpoint for subdriver */
 -	if (!dev->status) {
 -		rv = -EINVAL;
 -		goto err;
 -	}
 -
 -	subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
 -	if (IS_ERR(subdriver)) {
 -		rv = PTR_ERR(subdriver);
 -		goto err;
 -	}
 -
 -	/* can't let usbnet use the interrupt endpoint */
 -	dev->status = NULL;
 -
 -	/* save subdriver struct for suspend/resume wrappers */
 -	dev->data[0] = (unsigned long)subdriver;
 +	/*  control and data is shared */
 +	info->control = intf;
 +	info->data = intf;
 +	rv = qmi_wwan_register_subdriver(dev);
  
  err:
  	return rv;
  }
  
 -static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
 +static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
  {
 -	struct usb_driver *subdriver = (void *)dev->data[0];
 -
 -	if (subdriver && subdriver->disconnect)
 -		subdriver->disconnect(intf);
 +	struct qmi_wwan_state *info = (void *)&dev->data;
 +	struct usb_driver *driver = driver_of(intf);
 +	struct usb_interface *other;
 +
 +	if (info->subdriver && info->subdriver->disconnect)
 +		info->subdriver->disconnect(info->control);
 +
 +	/* allow user to unbind using either control or data */
 +	if (intf == info->control)
 +		other = info->data;
 +	else
 +		other = info->control;
 +
 +	/* only if not shared */
 +	if (other && intf != other) {
 +		usb_set_intfdata(other, NULL);
 +		usb_driver_release_interface(driver, other);
 +	}
  
 -	dev->data[0] = (unsigned long)NULL;
 +	info->subdriver = NULL;
 +	info->data = NULL;
 +	info->control = NULL;
  }
  
  /* suspend/resume wrappers calling both usbnet and the cdc-wdm
@@@ -308,15 -280,15 +308,15 @@@
  static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
  {
  	struct usbnet *dev = usb_get_intfdata(intf);
 -	struct usb_driver *subdriver = (void *)dev->data[0];
 +	struct qmi_wwan_state *info = (void *)&dev->data;
  	int ret;
  
  	ret = usbnet_suspend(intf, message);
  	if (ret < 0)
  		goto err;
  
 -	if (subdriver && subdriver->suspend)
 -		ret = subdriver->suspend(intf, message);
 +	if (info->subdriver && info->subdriver->suspend)
 +		ret = info->subdriver->suspend(intf, message);
  	if (ret < 0)
  		usbnet_resume(intf);
  err:
@@@ -326,33 -298,33 +326,33 @@@
  static int qmi_wwan_resume(struct usb_interface *intf)
  {
  	struct usbnet *dev = usb_get_intfdata(intf);
 -	struct usb_driver *subdriver = (void *)dev->data[0];
 +	struct qmi_wwan_state *info = (void *)&dev->data;
  	int ret = 0;
  
 -	if (subdriver && subdriver->resume)
 -		ret = subdriver->resume(intf);
 +	if (info->subdriver && info->subdriver->resume)
 +		ret = info->subdriver->resume(intf);
  	if (ret < 0)
  		goto err;
  	ret = usbnet_resume(intf);
 -	if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend)
 -		subdriver->suspend(intf, PMSG_SUSPEND);
 +	if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
 +		info->subdriver->suspend(intf, PMSG_SUSPEND);
  err:
  	return ret;
  }
  
 -
  static const struct driver_info	qmi_wwan_info = {
 -	.description	= "QMI speaking wwan device",
 +	.description	= "WWAN/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  };
  
  static const struct driver_info	qmi_wwan_shared = {
 -	.description	= "QMI speaking wwan device with combined interface",
 +	.description	= "WWAN/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  };
  
@@@ -360,7 -332,7 +360,7 @@@ static const struct driver_info	qmi_wwa
  	.description	= "Qualcomm WWAN/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  	.data		= BIT(0), /* interface whitelist bitmap */
  };
@@@ -369,16 -341,25 +369,25 @@@ static const struct driver_info	qmi_wwa
  	.description	= "Qualcomm WWAN/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  	.data		= BIT(1), /* interface whitelist bitmap */
  };
  
+ static const struct driver_info qmi_wwan_force_int2 = {
+ 	.description	= "Qualcomm WWAN/QMI device",
+ 	.flags		= FLAG_WWAN,
+ 	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
++	.unbind		= qmi_wwan_unbind,
+ 	.manage_power	= qmi_wwan_manage_power,
+ 	.data		= BIT(2), /* interface whitelist bitmap */
+ };
+ 
  static const struct driver_info	qmi_wwan_force_int3 = {
  	.description	= "Qualcomm WWAN/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  	.data		= BIT(3), /* interface whitelist bitmap */
  };
@@@ -387,7 -368,7 +396,7 @@@ static const struct driver_info	qmi_wwa
  	.description	= "Qualcomm WWAN/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  	.data		= BIT(4), /* interface whitelist bitmap */
  };
@@@ -409,7 -390,7 +418,7 @@@ static const struct driver_info	qmi_wwa
  	.description	= "Sierra Wireless wwan/QMI device",
  	.flags		= FLAG_WWAN,
  	.bind		= qmi_wwan_bind_shared,
 -	.unbind		= qmi_wwan_unbind_shared,
 +	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
  	.data		= BIT(8) | BIT(19), /* interface whitelist bitmap */
  };
@@@ -432,7 -413,7 +441,7 @@@ static const struct usb_device_id produ
  		.idVendor           = HUAWEI_VENDOR_ID,
  		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
  		.bInterfaceSubClass = 1,
 -		.bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
 +		.bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
  		.driver_info        = (unsigned long)&qmi_wwan_info,
  	},
  	{	/* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
@@@ -440,7 -421,7 +449,7 @@@
  		.idVendor           = HUAWEI_VENDOR_ID,
  		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
  		.bInterfaceSubClass = 1,
 -		.bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
 +		.bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
  		.driver_info        = (unsigned long)&qmi_wwan_info,
  	},
  	{	/* Huawei E392, E398 and possibly others in "Windows mode"
@@@ -526,6 -507,15 +535,15 @@@
  		.bInterfaceProtocol = 0xff,
  		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
  	},
+ 	{	/* ZTE MF60 */
+ 		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ 		.idVendor           = 0x19d2,
+ 		.idProduct          = 0x1402,
+ 		.bInterfaceClass    = 0xff,
+ 		.bInterfaceSubClass = 0xff,
+ 		.bInterfaceProtocol = 0xff,
+ 		.driver_info        = (unsigned long)&qmi_wwan_force_int2,
+ 	},
  	{	/* Sierra Wireless MC77xx in QMI mode */
  		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
  		.idVendor           = 0x1199,
@@@ -594,7 -584,17 +612,7 @@@ static struct usb_driver qmi_wwan_drive
  	.disable_hub_initiated_lpm = 1,
  };
  
 -static int __init qmi_wwan_init(void)
 -{
 -	return usb_register(&qmi_wwan_driver);
 -}
 -module_init(qmi_wwan_init);
 -
 -static void __exit qmi_wwan_exit(void)
 -{
 -	usb_deregister(&qmi_wwan_driver);
 -}
 -module_exit(qmi_wwan_exit);
 +module_usb_driver(qmi_wwan_driver);
  
  MODULE_AUTHOR("Bjørn Mork <bjorn at mork.no>");
  MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
diff --combined drivers/net/wireless/b43legacy/dma.c
index ff50cb4,c8baf02..2d3c664
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@@ -52,7 -52,7 +52,7 @@@ struct b43legacy_dmadesc32 *op32_idx2de
  	desc = ring->descbase;
  	desc = &(desc[slot]);
  
 -	return (struct b43legacy_dmadesc32 *)desc;
 +	return desc;
  }
  
  static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
@@@ -1072,7 -1072,7 +1072,7 @@@ static int dma_tx_fragment(struct b43le
  	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  	/* create a bounce buffer in zone_dma on mapping failure. */
  	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- 		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ 		bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
  		if (!bounce_skb) {
  			ring->current_slot = old_top_slot;
  			ring->used_slots = old_used_slots;
diff --combined drivers/net/wireless/iwlegacy/4965-mac.c
index d24eaf8,ff5d689..34f61a0
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@@ -3405,7 -3405,7 +3405,7 @@@ il4965_remove_dynamic_key(struct il_pri
  		return 0;
  	}
  
- 	if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ 	if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
  		IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
  			key_flags);
  		spin_unlock_irqrestore(&il->sta_lock, flags);
@@@ -3420,7 -3420,7 +3420,7 @@@
  	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
  	il->stations[sta_id].sta.key.key_flags =
  	    STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- 	il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ 	il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
  	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
  	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
  
@@@ -5724,8 -5724,7 +5724,8 @@@ il4965_mac_setup_register(struct il_pri
  	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
  
  	hw->wiphy->flags |=
 -	    WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
 +	    WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
 +	    WIPHY_FLAG_IBSS_RSN;
  
  	/*
  	 * For now, disable PS by default because it affects
@@@ -5874,16 -5873,6 +5874,16 @@@ il4965_mac_set_key(struct ieee80211_hw 
  		return -EOPNOTSUPP;
  	}
  
 +	/*
 +	 * To support IBSS RSN, don't program group keys in IBSS, the
 +	 * hardware will then not attempt to decrypt the frames.
 +	 */
 +	if (vif->type == NL80211_IFTYPE_ADHOC &&
 +	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
 +		D_MAC80211("leave - ad-hoc group key\n");
 +		return -EOPNOTSUPP;
 +	}
 +
  	sta_id = il_sta_id_or_broadcast(il, sta);
  	if (sta_id == IL_INVALID_STATION)
  		return -EINVAL;
diff --combined drivers/net/wireless/iwlegacy/common.c
index 763c752,5d4807c..0f8a770
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@@ -4717,11 -4717,10 +4717,11 @@@ il_check_stuck_queue(struct il_priv *il
  	struct il_tx_queue *txq = &il->txq[cnt];
  	struct il_queue *q = &txq->q;
  	unsigned long timeout;
 +	unsigned long now = jiffies;
  	int ret;
  
  	if (q->read_ptr == q->write_ptr) {
 -		txq->time_stamp = jiffies;
 +		txq->time_stamp = now;
  		return 0;
  	}
  
@@@ -4729,9 -4728,9 +4729,9 @@@
  	    txq->time_stamp +
  	    msecs_to_jiffies(il->cfg->wd_timeout);
  
 -	if (time_after(jiffies, timeout)) {
 +	if (time_after(now, timeout)) {
  		IL_ERR("Queue %d stuck for %u ms.\n", q->id,
 -		       il->cfg->wd_timeout);
 +		       jiffies_to_msecs(now - txq->time_stamp));
  		ret = il_force_reset(il, false);
  		return (ret == -EAGAIN) ? 0 : 1;
  	}
@@@ -4768,14 -4767,12 +4768,12 @@@ il_bg_watchdog(unsigned long data
  		return;
  
  	/* monitor and check for other stuck queues */
- 	if (il_is_any_associated(il)) {
- 		for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
- 			/* skip as we already checked the command queue */
- 			if (cnt == il->cmd_queue)
- 				continue;
- 			if (il_check_stuck_queue(il, cnt))
- 				return;
- 		}
+ 	for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ 		/* skip as we already checked the command queue */
+ 		if (cnt == il->cmd_queue)
+ 			continue;
+ 		if (il_check_stuck_queue(il, cnt))
+ 			return;
  	}
  
  	mod_timer(&il->watchdog,
diff --combined drivers/net/wireless/mwifiex/cfg80211.c
index 4b2733a,5c7fd18..3af88b8
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@@ -170,9 -170,7 +170,9 @@@ mwifiex_cfg80211_set_default_key(struc
  	if (!priv->sec_info.wep_enabled)
  		return 0;
  
 -	if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
 +	if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
 +		priv->wep_key_curr_index = key_index;
 +	} else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
  		wiphy_err(wiphy, "set default Tx key index\n");
  		return -EFAULT;
  	}
@@@ -189,25 -187,9 +189,25 @@@ mwifiex_cfg80211_add_key(struct wiphy *
  			 struct key_params *params)
  {
  	struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
 +	struct mwifiex_wep_key *wep_key;
  	const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  	const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
  
 +	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
 +	    (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
 +	     params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
 +		if (params->key && params->key_len) {
 +			wep_key = &priv->wep_key[key_index];
 +			memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
 +			memcpy(wep_key->key_material, params->key,
 +			       params->key_len);
 +			wep_key->key_index = key_index;
 +			wep_key->key_length = params->key_len;
 +			priv->sec_info.wep_enabled = 1;
 +		}
 +		return 0;
 +	}
 +
  	if (mwifiex_set_encode(priv, params->key, params->key_len,
  			       key_index, peer_mac, 0)) {
  		wiphy_err(wiphy, "crypto keys added\n");
@@@ -260,13 -242,13 +260,13 @@@ static int mwifiex_send_domain_info_cmd
  			flag = 1;
  			first_chan = (u32) ch->hw_value;
  			next_chan = first_chan;
 -			max_pwr = ch->max_power;
 +			max_pwr = ch->max_reg_power;
  			no_of_parsed_chan = 1;
  			continue;
  		}
  
  		if (ch->hw_value == next_chan + 1 &&
 -		    ch->max_power == max_pwr) {
 +		    ch->max_reg_power == max_pwr) {
  			next_chan++;
  			no_of_parsed_chan++;
  		} else {
@@@ -277,7 -259,7 +277,7 @@@
  			no_of_triplet++;
  			first_chan = (u32) ch->hw_value;
  			next_chan = first_chan;
 -			max_pwr = ch->max_power;
 +			max_pwr = ch->max_reg_power;
  			no_of_parsed_chan = 1;
  		}
  	}
@@@ -402,13 -384,13 +402,13 @@@ mwifiex_set_rf_channel(struct mwifiex_p
  	cfp.freq = chan->center_freq;
  	cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
  
 -	if (mwifiex_bss_set_channel(priv, &cfp))
 -		return -EFAULT;
 -
 -	if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
 +	if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
 +		if (mwifiex_bss_set_channel(priv, &cfp))
 +			return -EFAULT;
  		return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
 -	else
 -		return mwifiex_uap_set_channel(priv, cfp.channel);
 +	}
 +
 +	return 0;
  }
  
  /*
@@@ -976,28 -958,16 +976,29 @@@ static int mwifiex_cfg80211_start_ap(st
  	case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
  		/* firmware doesn't support this type of hidden SSID */
  	default:
+ 		kfree(bss_cfg);
  		return -EINVAL;
  	}
  
 +	bss_cfg->channel =
 +	    (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
 +	bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
 +
 +	if (mwifiex_set_rf_channel(priv, params->channel,
 +				   params->channel_type)) {
 +		kfree(bss_cfg);
 +		wiphy_err(wiphy, "Failed to set band config information!\n");
 +		return -1;
 +	}
 +
  	if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
  		kfree(bss_cfg);
  		wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
  		return -1;
  	}
  
 +	mwifiex_set_ht_params(priv, bss_cfg, params);
 +
  	if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
  				  HostCmd_ACT_GEN_SET, 0, NULL)) {
  		wiphy_err(wiphy, "Failed to stop the BSS\n");
@@@ -1021,16 -991,6 +1022,16 @@@
  		return -1;
  	}
  
 +	if (priv->sec_info.wep_enabled)
 +		priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
 +	else
 +		priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
 +
 +	if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
 +				  HostCmd_ACT_GEN_SET, 0,
 +				  &priv->curr_pkt_filter))
 +		return -1;
 +
  	return 0;
  }
  
@@@ -1422,7 -1382,7 +1423,7 @@@ mwifiex_cfg80211_scan(struct wiphy *wip
  
  		priv->user_scan_cfg->chan_list[i].scan_time = 0;
  	}
 -	if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
 +	if (mwifiex_scan_networks(priv, priv->user_scan_cfg))
  		return -EFAULT;
  
  	if (request->ie && request->ie_len) {
@@@ -1743,7 -1703,7 +1744,7 @@@ int mwifiex_register_cfg80211(struct mw
  
  	memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
  	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 -	wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY;
 +	wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
  
  	/* Reserve space for mwifiex specific private data for BSS */
  	wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@@ -1754,7 -1714,7 +1755,7 @@@
  	wdev_priv = wiphy_priv(wiphy);
  	*(unsigned long *)wdev_priv = (unsigned long)adapter;
  
 -	set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev);
 +	set_wiphy_dev(wiphy, priv->adapter->dev);
  
  	ret = wiphy_register(wiphy);
  	if (ret < 0) {
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 3483e40,c5863f4..6705d35
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -1,4 -1,5 +1,4 @@@
 -/*
 - * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
   *
   * Simon Wunderlich
   *
@@@ -15,6 -16,7 +15,6 @@@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
   * 02110-1301, USA
 - *
   */
  
  #include "main.h"
@@@ -31,14 -33,14 +31,14 @@@
  #include <net/arp.h>
  #include <linux/if_vlan.h>
  
 -static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
 +static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
  
 -static void bla_periodic_work(struct work_struct *work);
 -static void bla_send_announce(struct bat_priv *bat_priv,
 -			      struct backbone_gw *backbone_gw);
 +static void batadv_bla_periodic_work(struct work_struct *work);
 +static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 +				     struct batadv_backbone_gw *backbone_gw);
  
  /* return the index of the claim */
 -static inline uint32_t choose_claim(const void *data, uint32_t size)
 +static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
  {
  	const unsigned char *key = data;
  	uint32_t hash = 0;
@@@ -58,8 -60,7 +58,8 @@@
  }
  
  /* return the index of the backbone gateway */
 -static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
 +static inline uint32_t batadv_choose_backbone_gw(const void *data,
 +						 uint32_t size)
  {
  	const unsigned char *key = data;
  	uint32_t hash = 0;
@@@ -80,75 -81,74 +80,75 @@@
  
  
  /* compares address and vid of two backbone gws */
 -static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
 +static int batadv_compare_backbone_gw(const struct hlist_node *node,
 +				      const void *data2)
  {
 -	const void *data1 = container_of(node, struct backbone_gw,
 +	const void *data1 = container_of(node, struct batadv_backbone_gw,
  					 hash_entry);
  
  	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
  }
  
  /* compares address and vid of two claims */
 -static int compare_claim(const struct hlist_node *node, const void *data2)
 +static int batadv_compare_claim(const struct hlist_node *node,
 +				const void *data2)
  {
 -	const void *data1 = container_of(node, struct claim,
 +	const void *data1 = container_of(node, struct batadv_claim,
  					 hash_entry);
  
  	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
  }
  
  /* free a backbone gw */
 -static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
 +static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
  {
  	if (atomic_dec_and_test(&backbone_gw->refcount))
  		kfree_rcu(backbone_gw, rcu);
  }
  
  /* finally deinitialize the claim */
 -static void claim_free_rcu(struct rcu_head *rcu)
 +static void batadv_claim_free_rcu(struct rcu_head *rcu)
  {
 -	struct claim *claim;
 +	struct batadv_claim *claim;
  
 -	claim = container_of(rcu, struct claim, rcu);
 +	claim = container_of(rcu, struct batadv_claim, rcu);
  
 -	backbone_gw_free_ref(claim->backbone_gw);
 +	batadv_backbone_gw_free_ref(claim->backbone_gw);
  	kfree(claim);
  }
  
  /* free a claim, call claim_free_rcu if its the last reference */
 -static void claim_free_ref(struct claim *claim)
 +static void batadv_claim_free_ref(struct batadv_claim *claim)
  {
  	if (atomic_dec_and_test(&claim->refcount))
 -		call_rcu(&claim->rcu, claim_free_rcu);
 +		call_rcu(&claim->rcu, batadv_claim_free_rcu);
  }
  
 -/**
 - * @bat_priv: the bat priv with all the soft interface information
 +/* @bat_priv: the bat priv with all the soft interface information
   * @data: search data (may be local/static data)
   *
   * looks for a claim in the hash, and returns it if found
   * or NULL otherwise.
   */
 -static struct claim *claim_hash_find(struct bat_priv *bat_priv,
 -				     struct claim *data)
 +static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
 +						   struct batadv_claim *data)
  {
 -	struct hashtable_t *hash = bat_priv->claim_hash;
 +	struct batadv_hashtable *hash = bat_priv->claim_hash;
  	struct hlist_head *head;
  	struct hlist_node *node;
 -	struct claim *claim;
 -	struct claim *claim_tmp = NULL;
 +	struct batadv_claim *claim;
 +	struct batadv_claim *claim_tmp = NULL;
  	int index;
  
  	if (!hash)
  		return NULL;
  
 -	index = choose_claim(data, hash->size);
 +	index = batadv_choose_claim(data, hash->size);
  	head = &hash->table[index];
  
  	rcu_read_lock();
  	hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
 -		if (!compare_claim(&claim->hash_entry, data))
 +		if (!batadv_compare_claim(&claim->hash_entry, data))
  			continue;
  
  		if (!atomic_inc_not_zero(&claim->refcount))
@@@ -163,22 -163,21 +163,22 @@@
  }
  
  /**
 + * batadv_backbone_hash_find - looks for a claim in the hash
   * @bat_priv: the bat priv with all the soft interface information
   * @addr: the address of the originator
   * @vid: the VLAN ID
   *
 - * looks for a claim in the hash, and returns it if found
 - * or NULL otherwise.
 + * Returns claim if found or NULL otherwise.
   */
 -static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
 -					      uint8_t *addr, short vid)
 +static struct batadv_backbone_gw *
 +batadv_backbone_hash_find(struct batadv_priv *bat_priv,
 +			  uint8_t *addr, short vid)
  {
 -	struct hashtable_t *hash = bat_priv->backbone_hash;
 +	struct batadv_hashtable *hash = bat_priv->backbone_hash;
  	struct hlist_head *head;
  	struct hlist_node *node;
 -	struct backbone_gw search_entry, *backbone_gw;
 -	struct backbone_gw *backbone_gw_tmp = NULL;
 +	struct batadv_backbone_gw search_entry, *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw_tmp = NULL;
  	int index;
  
  	if (!hash)
@@@ -187,13 -186,13 +187,13 @@@
  	memcpy(search_entry.orig, addr, ETH_ALEN);
  	search_entry.vid = vid;
  
 -	index = choose_backbone_gw(&search_entry, hash->size);
 +	index = batadv_choose_backbone_gw(&search_entry, hash->size);
  	head = &hash->table[index];
  
  	rcu_read_lock();
  	hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
 -		if (!compare_backbone_gw(&backbone_gw->hash_entry,
 -					 &search_entry))
 +		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
 +						&search_entry))
  			continue;
  
  		if (!atomic_inc_not_zero(&backbone_gw->refcount))
@@@ -208,13 -207,12 +208,13 @@@
  }
  
  /* delete all claims for a backbone */
 -static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
 +static void
 +batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
  {
 -	struct hashtable_t *hash;
 +	struct batadv_hashtable *hash;
  	struct hlist_node *node, *node_tmp;
  	struct hlist_head *head;
 -	struct claim *claim;
 +	struct batadv_claim *claim;
  	int i;
  	spinlock_t *list_lock;	/* protects write access to the hash lists */
  
@@@ -233,35 -231,36 +233,35 @@@
  			if (claim->backbone_gw != backbone_gw)
  				continue;
  
 -			claim_free_ref(claim);
 +			batadv_claim_free_ref(claim);
  			hlist_del_rcu(node);
  		}
  		spin_unlock_bh(list_lock);
  	}
  
  	/* all claims gone, intialize CRC */
 -	backbone_gw->crc = BLA_CRC_INIT;
 +	backbone_gw->crc = BATADV_BLA_CRC_INIT;
  }
  
  /**
 + * batadv_bla_send_claim - sends a claim frame according to the provided info
   * @bat_priv: the bat priv with all the soft interface information
   * @orig: the mac address to be announced within the claim
   * @vid: the VLAN ID
   * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
 - *
 - * sends a claim frame according to the provided info.
   */
 -static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
 -			   short vid, int claimtype)
 +static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 +				  short vid, int claimtype)
  {
  	struct sk_buff *skb;
  	struct ethhdr *ethhdr;
 -	struct hard_iface *primary_if;
 +	struct batadv_hard_iface *primary_if;
  	struct net_device *soft_iface;
  	uint8_t *hw_src;
 -	struct bla_claim_dst local_claim_dest;
 -	uint32_t zeroip = 0;
 +	struct batadv_bla_claim_dst local_claim_dest;
 +	__be32 zeroip = 0;
  
 -	primary_if = primary_if_get_selected(bat_priv);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  	if (!primary_if)
  		return;
  
@@@ -295,41 -294,40 +295,41 @@@
  
  	/* now we pretend that the client would have sent this ... */
  	switch (claimtype) {
 -	case CLAIM_TYPE_ADD:
 +	case BATADV_CLAIM_TYPE_ADD:
  		/* normal claim frame
  		 * set Ethernet SRC to the clients mac
  		 */
  		memcpy(ethhdr->h_source, mac, ETH_ALEN);
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
  		break;
 -	case CLAIM_TYPE_DEL:
 +	case BATADV_CLAIM_TYPE_DEL:
  		/* unclaim frame
  		 * set HW SRC to the clients mac
  		 */
  		memcpy(hw_src, mac, ETH_ALEN);
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
 +			   vid);
  		break;
 -	case CLAIM_TYPE_ANNOUNCE:
 +	case BATADV_CLAIM_TYPE_ANNOUNCE:
  		/* announcement frame
  		 * set HW SRC to the special mac containg the crc
  		 */
  		memcpy(hw_src, mac, ETH_ALEN);
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
 -			ethhdr->h_source, vid);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
 +			   ethhdr->h_source, vid);
  		break;
 -	case CLAIM_TYPE_REQUEST:
 +	case BATADV_CLAIM_TYPE_REQUEST:
  		/* request frame
  		 * set HW SRC to the special mac containg the crc
  		 */
  		memcpy(hw_src, mac, ETH_ALEN);
  		memcpy(ethhdr->h_dest, mac, ETH_ALEN);
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
 -			ethhdr->h_source, ethhdr->h_dest, vid);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
 +			   ethhdr->h_source, ethhdr->h_dest, vid);
  		break;
  
  	}
@@@ -346,11 -344,10 +346,11 @@@
  	netif_rx(skb);
  out:
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  }
  
  /**
 + * batadv_bla_get_backbone_gw
   * @bat_priv: the bat priv with all the soft interface information
   * @orig: the mac address of the originator
   * @vid: the VLAN ID
@@@ -358,22 -355,21 +358,22 @@@
   * searches for the backbone gw or creates a new one if it could not
   * be found.
   */
 -static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
 -					       uint8_t *orig, short vid)
 +static struct batadv_backbone_gw *
 +batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
 +			   short vid)
  {
 -	struct backbone_gw *entry;
 -	struct orig_node *orig_node;
 +	struct batadv_backbone_gw *entry;
 +	struct batadv_orig_node *orig_node;
  	int hash_added;
  
 -	entry = backbone_hash_find(bat_priv, orig, vid);
 +	entry = batadv_backbone_hash_find(bat_priv, orig, vid);
  
  	if (entry)
  		return entry;
  
 -	bat_dbg(DBG_BLA, bat_priv,
 -		"bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
 -		orig, vid);
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +		   "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
 +		   orig, vid);
  
  	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  	if (!entry)
@@@ -381,7 -377,7 +381,7 @@@
  
  	entry->vid = vid;
  	entry->lasttime = jiffies;
 -	entry->crc = BLA_CRC_INIT;
 +	entry->crc = BATADV_BLA_CRC_INIT;
  	entry->bat_priv = bat_priv;
  	atomic_set(&entry->request_sent, 0);
  	memcpy(entry->orig, orig, ETH_ALEN);
@@@ -389,10 -385,8 +389,10 @@@
  	/* one for the hash, one for returning */
  	atomic_set(&entry->refcount, 2);
  
 -	hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
 -			      choose_backbone_gw, entry, &entry->hash_entry);
 +	hash_added = batadv_hash_add(bat_priv->backbone_hash,
 +				     batadv_compare_backbone_gw,
 +				     batadv_choose_backbone_gw, entry,
 +				     &entry->hash_entry);
  
  	if (unlikely(hash_added != 0)) {
  		/* hash failed, free the structure */
@@@ -401,11 -395,11 +401,11 @@@
  	}
  
  	/* this is a gateway now, remove any tt entries */
 -	orig_node = orig_hash_find(bat_priv, orig);
 +	orig_node = batadv_orig_hash_find(bat_priv, orig);
  	if (orig_node) {
 -		tt_global_del_orig(bat_priv, orig_node,
 -				   "became a backbone gateway");
 -		orig_node_free_ref(orig_node);
 +		batadv_tt_global_del_orig(bat_priv, orig_node,
 +					  "became a backbone gateway");
 +		batadv_orig_node_free_ref(orig_node);
  	}
  	return entry;
  }
@@@ -413,46 -407,43 +413,46 @@@
  /* update or add the own backbone gw to make sure we announce
   * where we receive other backbone gws
   */
 -static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
 -				       struct hard_iface *primary_if,
 -				       short vid)
 +static void
 +batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 +				  struct batadv_hard_iface *primary_if,
 +				  short vid)
  {
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  
 -	backbone_gw = bla_get_backbone_gw(bat_priv,
 -					  primary_if->net_dev->dev_addr, vid);
 +	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
 +						 primary_if->net_dev->dev_addr,
 +						 vid);
  	if (unlikely(!backbone_gw))
  		return;
  
  	backbone_gw->lasttime = jiffies;
 -	backbone_gw_free_ref(backbone_gw);
 +	batadv_backbone_gw_free_ref(backbone_gw);
  }
  
 -/**
 - * @bat_priv: the bat priv with all the soft interface information
 +/* @bat_priv: the bat priv with all the soft interface information
   * @vid: the vid where the request came on
   *
   * Repeat all of our own claims, and finally send an ANNOUNCE frame
   * to allow the requester another check if the CRC is correct now.
   */
 -static void bla_answer_request(struct bat_priv *bat_priv,
 -			       struct hard_iface *primary_if, short vid)
 +static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
 +				      struct batadv_hard_iface *primary_if,
 +				      short vid)
  {
  	struct hlist_node *node;
  	struct hlist_head *head;
 -	struct hashtable_t *hash;
 -	struct claim *claim;
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_hashtable *hash;
 +	struct batadv_claim *claim;
 +	struct batadv_backbone_gw *backbone_gw;
  	int i;
  
 -	bat_dbg(DBG_BLA, bat_priv,
 -		"bla_answer_request(): received a claim request, send all of our own claims again\n");
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +		   "bla_answer_request(): received a claim request, send all of our own claims again\n");
  
 -	backbone_gw = backbone_hash_find(bat_priv,
 -					 primary_if->net_dev->dev_addr, vid);
 +	backbone_gw = batadv_backbone_hash_find(bat_priv,
 +						primary_if->net_dev->dev_addr,
 +						vid);
  	if (!backbone_gw)
  		return;
  
@@@ -466,34 -457,36 +466,34 @@@
  			if (claim->backbone_gw != backbone_gw)
  				continue;
  
 -			bla_send_claim(bat_priv, claim->addr, claim->vid,
 -				       CLAIM_TYPE_ADD);
 +			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
 +					      BATADV_CLAIM_TYPE_ADD);
  		}
  		rcu_read_unlock();
  	}
  
  	/* finally, send an announcement frame */
 -	bla_send_announce(bat_priv, backbone_gw);
 -	backbone_gw_free_ref(backbone_gw);
 +	batadv_bla_send_announce(bat_priv, backbone_gw);
 +	batadv_backbone_gw_free_ref(backbone_gw);
  }
  
 -/**
 - * @backbone_gw: the backbone gateway from whom we are out of sync
 +/* @backbone_gw: the backbone gateway from whom we are out of sync
   *
   * When the crc is wrong, ask the backbone gateway for a full table update.
   * After the request, it will repeat all of his own claims and finally
   * send an announcement claim with which we can check again.
   */
 -static void bla_send_request(struct backbone_gw *backbone_gw)
 +static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
  {
  	/* first, remove all old entries */
 -	bla_del_backbone_claims(backbone_gw);
 +	batadv_bla_del_backbone_claims(backbone_gw);
  
 -	bat_dbg(DBG_BLA, backbone_gw->bat_priv,
 -		"Sending REQUEST to %pM\n",
 -		backbone_gw->orig);
 +	batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
 +		   "Sending REQUEST to %pM\n", backbone_gw->orig);
  
  	/* send request */
 -	bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
 -		       backbone_gw->vid, CLAIM_TYPE_REQUEST);
 +	batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
 +			      backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
  
  	/* no local broadcasts should be sent or received, for now. */
  	if (!atomic_read(&backbone_gw->request_sent)) {
@@@ -502,45 -495,45 +502,45 @@@
  	}
  }
  
 -/**
 - * @bat_priv: the bat priv with all the soft interface information
 +/* @bat_priv: the bat priv with all the soft interface information
   * @backbone_gw: our backbone gateway which should be announced
   *
   * This function sends an announcement. It is called from multiple
   * places.
   */
 -static void bla_send_announce(struct bat_priv *bat_priv,
 -			      struct backbone_gw *backbone_gw)
 +static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 +				     struct batadv_backbone_gw *backbone_gw)
  {
  	uint8_t mac[ETH_ALEN];
 -	uint16_t crc;
 +	__be16 crc;
  
 -	memcpy(mac, announce_mac, 4);
 +	memcpy(mac, batadv_announce_mac, 4);
  	crc = htons(backbone_gw->crc);
 -	memcpy(&mac[4], (uint8_t *)&crc, 2);
 +	memcpy(&mac[4], &crc, 2);
  
 -	bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
 +	batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
 +			      BATADV_CLAIM_TYPE_ANNOUNCE);
  
  }
  
  /**
 + * batadv_bla_add_claim - Adds a claim in the claim hash
   * @bat_priv: the bat priv with all the soft interface information
   * @mac: the mac address of the claim
   * @vid: the VLAN ID of the frame
   * @backbone_gw: the backbone gateway which claims it
 - *
 - * Adds a claim in the claim hash.
   */
 -static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
 -			  const short vid, struct backbone_gw *backbone_gw)
 +static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
 +				 const uint8_t *mac, const short vid,
 +				 struct batadv_backbone_gw *backbone_gw)
  {
 -	struct claim *claim;
 -	struct claim search_claim;
 +	struct batadv_claim *claim;
 +	struct batadv_claim search_claim;
  	int hash_added;
  
  	memcpy(search_claim.addr, mac, ETH_ALEN);
  	search_claim.vid = vid;
 -	claim = claim_hash_find(bat_priv, &search_claim);
 +	claim = batadv_claim_hash_find(bat_priv, &search_claim);
  
  	/* create a new claim entry if it does not exist yet. */
  	if (!claim) {
@@@ -554,13 -547,11 +554,13 @@@
  		claim->backbone_gw = backbone_gw;
  
  		atomic_set(&claim->refcount, 2);
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
 -			mac, vid);
 -		hash_added = hash_add(bat_priv->claim_hash, compare_claim,
 -				      choose_claim, claim, &claim->hash_entry);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
 +			   mac, vid);
 +		hash_added = batadv_hash_add(bat_priv->claim_hash,
 +					     batadv_compare_claim,
 +					     batadv_choose_claim, claim,
 +					     &claim->hash_entry);
  
  		if (unlikely(hash_added != 0)) {
  			/* only local changes happened. */
@@@ -573,13 -564,13 +573,13 @@@
  			/* no need to register a new backbone */
  			goto claim_free_ref;
  
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_add_claim(): changing ownership for %pM, vid %d\n",
 -			mac, vid);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_add_claim(): changing ownership for %pM, vid %d\n",
 +			   mac, vid);
  
  		claim->backbone_gw->crc ^=
  			crc16(0, claim->addr, ETH_ALEN);
 -		backbone_gw_free_ref(claim->backbone_gw);
 +		batadv_backbone_gw_free_ref(claim->backbone_gw);
  
  	}
  	/* set (new) backbone gw */
@@@ -590,48 -581,45 +590,48 @@@
  	backbone_gw->lasttime = jiffies;
  
  claim_free_ref:
 -	claim_free_ref(claim);
 +	batadv_claim_free_ref(claim);
  }
  
  /* Delete a claim from the claim hash which has the
   * given mac address and vid.
   */
 -static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
 -			  const short vid)
 +static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 +				 const uint8_t *mac, const short vid)
  {
 -	struct claim search_claim, *claim;
 +	struct batadv_claim search_claim, *claim;
  
  	memcpy(search_claim.addr, mac, ETH_ALEN);
  	search_claim.vid = vid;
 -	claim = claim_hash_find(bat_priv, &search_claim);
 +	claim = batadv_claim_hash_find(bat_priv, &search_claim);
  	if (!claim)
  		return;
  
 -	bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
 +		   mac, vid);
  
 -	hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
 -	claim_free_ref(claim); /* reference from the hash is gone */
 +	batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
 +			   batadv_choose_claim, claim);
 +	batadv_claim_free_ref(claim); /* reference from the hash is gone */
  
  	claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
  
  	/* don't need the reference from hash_find() anymore */
 -	claim_free_ref(claim);
 +	batadv_claim_free_ref(claim);
  }
  
  /* check for ANNOUNCE frame, return 1 if handled */
 -static int handle_announce(struct bat_priv *bat_priv,
 -			   uint8_t *an_addr, uint8_t *backbone_addr, short vid)
 +static int batadv_handle_announce(struct batadv_priv *bat_priv,
 +				  uint8_t *an_addr, uint8_t *backbone_addr,
 +				  short vid)
  {
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  	uint16_t crc;
  
 -	if (memcmp(an_addr, announce_mac, 4) != 0)
 +	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
  		return 0;
  
 -	backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
 +	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
  
  	if (unlikely(!backbone_gw))
  		return 1;
@@@ -639,19 -627,19 +639,19 @@@
  
  	/* handle as ANNOUNCE frame */
  	backbone_gw->lasttime = jiffies;
 -	crc = ntohs(*((uint16_t *)(&an_addr[4])));
 +	crc = ntohs(*((__be16 *)(&an_addr[4])));
  
 -	bat_dbg(DBG_BLA, bat_priv,
 -		"handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
 -		vid, backbone_gw->orig, crc);
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +		   "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
 +		   vid, backbone_gw->orig, crc);
  
  	if (backbone_gw->crc != crc) {
 -		bat_dbg(DBG_BLA, backbone_gw->bat_priv,
 -			"handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
 -			backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
 -			crc);
 +		batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
 +			   "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
 +			   backbone_gw->orig, backbone_gw->vid,
 +			   backbone_gw->crc, crc);
  
 -		bla_send_request(backbone_gw);
 +		batadv_bla_send_request(backbone_gw);
  	} else {
  		/* if we have sent a request and the crc was OK,
  		 * we can allow traffic again.
@@@ -662,92 -650,88 +662,92 @@@
  		}
  	}
  
 -	backbone_gw_free_ref(backbone_gw);
 +	batadv_backbone_gw_free_ref(backbone_gw);
  	return 1;
  }
  
  /* check for REQUEST frame, return 1 if handled */
 -static int handle_request(struct bat_priv *bat_priv,
 -			  struct hard_iface *primary_if,
 -			  uint8_t *backbone_addr,
 -			  struct ethhdr *ethhdr, short vid)
 +static int batadv_handle_request(struct batadv_priv *bat_priv,
 +				 struct batadv_hard_iface *primary_if,
 +				 uint8_t *backbone_addr,
 +				 struct ethhdr *ethhdr, short vid)
  {
  	/* check for REQUEST frame */
 -	if (!compare_eth(backbone_addr, ethhdr->h_dest))
 +	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
  		return 0;
  
  	/* sanity check, this should not happen on a normal switch,
  	 * we ignore it in this case.
  	 */
 -	if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
 +	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
  		return 1;
  
 -	bat_dbg(DBG_BLA, bat_priv,
 -		"handle_request(): REQUEST vid %d (sent by %pM)...\n",
 -		vid, ethhdr->h_source);
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +		   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
 +		   vid, ethhdr->h_source);
  
 -	bla_answer_request(bat_priv, primary_if, vid);
 +	batadv_bla_answer_request(bat_priv, primary_if, vid);
  	return 1;
  }
  
  /* check for UNCLAIM frame, return 1 if handled */
 -static int handle_unclaim(struct bat_priv *bat_priv,
 -			  struct hard_iface *primary_if,
 -			  uint8_t *backbone_addr,
 -			  uint8_t *claim_addr, short vid)
 +static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
 +				 struct batadv_hard_iface *primary_if,
 +				 uint8_t *backbone_addr,
 +				 uint8_t *claim_addr, short vid)
  {
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  
  	/* unclaim in any case if it is our own */
 -	if (primary_if && compare_eth(backbone_addr,
 -				      primary_if->net_dev->dev_addr))
 -		bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
 +	if (primary_if && batadv_compare_eth(backbone_addr,
 +					     primary_if->net_dev->dev_addr))
 +		batadv_bla_send_claim(bat_priv, claim_addr, vid,
 +				      BATADV_CLAIM_TYPE_DEL);
  
 -	backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
 +	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
  
  	if (!backbone_gw)
  		return 1;
  
  	/* this must be an UNCLAIM frame */
 -	bat_dbg(DBG_BLA, bat_priv,
 -		"handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
 -		claim_addr, vid, backbone_gw->orig);
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +		   "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
 +		   claim_addr, vid, backbone_gw->orig);
  
 -	bla_del_claim(bat_priv, claim_addr, vid);
 -	backbone_gw_free_ref(backbone_gw);
 +	batadv_bla_del_claim(bat_priv, claim_addr, vid);
 +	batadv_backbone_gw_free_ref(backbone_gw);
  	return 1;
  }
  
  /* check for CLAIM frame, return 1 if handled */
 -static int handle_claim(struct bat_priv *bat_priv,
 -			struct hard_iface *primary_if, uint8_t *backbone_addr,
 -			uint8_t *claim_addr, short vid)
 +static int batadv_handle_claim(struct batadv_priv *bat_priv,
 +			       struct batadv_hard_iface *primary_if,
 +			       uint8_t *backbone_addr, uint8_t *claim_addr,
 +			       short vid)
  {
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  
  	/* register the gateway if not yet available, and add the claim. */
  
 -	backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
 +	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
  
  	if (unlikely(!backbone_gw))
  		return 1;
  
  	/* this must be a CLAIM frame */
 -	bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
 -	if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
 -		bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
 +	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
 +	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
 +		batadv_bla_send_claim(bat_priv, claim_addr, vid,
 +				      BATADV_CLAIM_TYPE_ADD);
  
  	/* TODO: we could call something like tt_local_del() here. */
  
 -	backbone_gw_free_ref(backbone_gw);
 +	batadv_backbone_gw_free_ref(backbone_gw);
  	return 1;
  }
  
  /**
 + * batadv_check_claim_group
   * @bat_priv: the bat priv with all the soft interface information
   * @hw_src: the Hardware source in the ARP Header
   * @hw_dst: the Hardware destination in the ARP Header
@@@ -762,16 -746,16 +762,16 @@@
   *	1  - if is a claim packet from another group
   *	0  - if it is not a claim packet
   */
 -static int check_claim_group(struct bat_priv *bat_priv,
 -			     struct hard_iface *primary_if,
 -			     uint8_t *hw_src, uint8_t *hw_dst,
 -			     struct ethhdr *ethhdr)
 +static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 +				    struct batadv_hard_iface *primary_if,
 +				    uint8_t *hw_src, uint8_t *hw_dst,
 +				    struct ethhdr *ethhdr)
  {
  	uint8_t *backbone_addr;
 -	struct orig_node *orig_node;
 -	struct bla_claim_dst *bla_dst, *bla_dst_own;
 +	struct batadv_orig_node *orig_node;
 +	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
  
 -	bla_dst = (struct bla_claim_dst *)hw_dst;
 +	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
  	bla_dst_own = &bat_priv->claim_dest;
  
  	/* check if it is a claim packet in general */
@@@ -783,12 -767,12 +783,12 @@@
  	 * otherwise assume it is in the hw_src
  	 */
  	switch (bla_dst->type) {
 -	case CLAIM_TYPE_ADD:
 +	case BATADV_CLAIM_TYPE_ADD:
  		backbone_addr = hw_src;
  		break;
 -	case CLAIM_TYPE_REQUEST:
 -	case CLAIM_TYPE_ANNOUNCE:
 -	case CLAIM_TYPE_DEL:
 +	case BATADV_CLAIM_TYPE_REQUEST:
 +	case BATADV_CLAIM_TYPE_ANNOUNCE:
 +	case BATADV_CLAIM_TYPE_DEL:
  		backbone_addr = ethhdr->h_source;
  		break;
  	default:
@@@ -796,7 -780,7 +796,7 @@@
  	}
  
  	/* don't accept claim frames from ourselves */
 -	if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
 +	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
  		return 0;
  
  	/* if its already the same group, it is fine. */
@@@ -804,7 -788,7 +804,7 @@@
  		return 2;
  
  	/* lets see if this originator is in our mesh */
 -	orig_node = orig_hash_find(bat_priv, backbone_addr);
 +	orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
  
  	/* dont accept claims from gateways which are not in
  	 * the same mesh or group.
@@@ -814,19 -798,20 +814,19 @@@
  
  	/* if our mesh friends mac is bigger, use it for ourselves. */
  	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"taking other backbones claim group: %04x\n",
 -			ntohs(bla_dst->group));
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "taking other backbones claim group: %04x\n",
 +			   ntohs(bla_dst->group));
  		bla_dst_own->group = bla_dst->group;
  	}
  
 -	orig_node_free_ref(orig_node);
 +	batadv_orig_node_free_ref(orig_node);
  
  	return 2;
  }
  
  
 -/**
 - * @bat_priv: the bat priv with all the soft interface information
 +/* @bat_priv: the bat priv with all the soft interface information
   * @skb: the frame to be checked
   *
   * Check if this is a claim frame, and process it accordingly.
@@@ -834,15 -819,15 +834,15 @@@
   * returns 1 if it was a claim frame, otherwise return 0 to
   * tell the callee that it can use the frame on its own.
   */
 -static int bla_process_claim(struct bat_priv *bat_priv,
 -			     struct hard_iface *primary_if,
 -			     struct sk_buff *skb)
 +static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
 +				    struct batadv_hard_iface *primary_if,
 +				    struct sk_buff *skb)
  {
  	struct ethhdr *ethhdr;
  	struct vlan_ethhdr *vhdr;
  	struct arphdr *arphdr;
  	uint8_t *hw_src, *hw_dst;
 -	struct bla_claim_dst *bla_dst;
 +	struct batadv_bla_claim_dst *bla_dst;
  	uint16_t proto;
  	int headlen;
  	short vid = -1;
@@@ -875,6 -860,7 +875,6 @@@
  	/* Check whether the ARP frame carries a valid
  	 * IP information
  	 */
 -
  	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
  		return 0;
  	if (arphdr->ar_pro != htons(ETH_P_IP))
@@@ -886,62 -872,59 +886,62 @@@
  
  	hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
  	hw_dst = hw_src + ETH_ALEN + 4;
 -	bla_dst = (struct bla_claim_dst *)hw_dst;
 +	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
  
  	/* check if it is a claim frame. */
 -	ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
 +	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
 +				       ethhdr);
  	if (ret == 1)
 -		bat_dbg(DBG_BLA, bat_priv,
 -			"bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
 -			ethhdr->h_source, vid, hw_src, hw_dst);
 +		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +			   "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
 +			   ethhdr->h_source, vid, hw_src, hw_dst);
  
  	if (ret < 2)
  		return ret;
  
  	/* become a backbone gw ourselves on this vlan if not happened yet */
 -	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
 +	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
  
  	/* check for the different types of claim frames ... */
  	switch (bla_dst->type) {
 -	case CLAIM_TYPE_ADD:
 -		if (handle_claim(bat_priv, primary_if, hw_src,
 -				 ethhdr->h_source, vid))
 +	case BATADV_CLAIM_TYPE_ADD:
 +		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
 +					ethhdr->h_source, vid))
  			return 1;
  		break;
 -	case CLAIM_TYPE_DEL:
 -		if (handle_unclaim(bat_priv, primary_if,
 -				   ethhdr->h_source, hw_src, vid))
 +	case BATADV_CLAIM_TYPE_DEL:
 +		if (batadv_handle_unclaim(bat_priv, primary_if,
 +					  ethhdr->h_source, hw_src, vid))
  			return 1;
  		break;
  
 -	case CLAIM_TYPE_ANNOUNCE:
 -		if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
 +	case BATADV_CLAIM_TYPE_ANNOUNCE:
 +		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
 +					   vid))
  			return 1;
  		break;
 -	case CLAIM_TYPE_REQUEST:
 -		if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
 +	case BATADV_CLAIM_TYPE_REQUEST:
 +		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
 +					  vid))
  			return 1;
  		break;
  	}
  
 -	bat_dbg(DBG_BLA, bat_priv,
 -		"bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
 -		ethhdr->h_source, vid, hw_src, hw_dst);
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +		   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
 +		   ethhdr->h_source, vid, hw_src, hw_dst);
  	return 1;
  }
  
  /* Check when we last heard from other nodes, and remove them in case of
   * a time out, or clean all backbone gws if now is set.
   */
 -static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
 +static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
  {
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  	struct hlist_node *node, *node_tmp;
  	struct hlist_head *head;
 -	struct hashtable_t *hash;
 +	struct batadv_hashtable *hash;
  	spinlock_t *list_lock;	/* protects write access to the hash lists */
  	int i;
  
@@@ -958,30 -941,29 +958,30 @@@
  					  head, hash_entry) {
  			if (now)
  				goto purge_now;
 -			if (!has_timed_out(backbone_gw->lasttime,
 -					   BLA_BACKBONE_TIMEOUT))
 +			if (!batadv_has_timed_out(backbone_gw->lasttime,
 +						  BATADV_BLA_BACKBONE_TIMEOUT))
  				continue;
  
 -			bat_dbg(DBG_BLA, backbone_gw->bat_priv,
 -				"bla_purge_backbone_gw(): backbone gw %pM timed out\n",
 -				backbone_gw->orig);
 +			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
 +				   "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
 +				   backbone_gw->orig);
  
  purge_now:
  			/* don't wait for the pending request anymore */
  			if (atomic_read(&backbone_gw->request_sent))
  				atomic_dec(&bat_priv->bla_num_requests);
  
 -			bla_del_backbone_claims(backbone_gw);
 +			batadv_bla_del_backbone_claims(backbone_gw);
  
  			hlist_del_rcu(node);
 -			backbone_gw_free_ref(backbone_gw);
 +			batadv_backbone_gw_free_ref(backbone_gw);
  		}
  		spin_unlock_bh(list_lock);
  	}
  }
  
  /**
 + * batadv_bla_purge_claims
   * @bat_priv: the bat priv with all the soft interface information
   * @primary_if: the selected primary interface, may be NULL if now is set
   * @now: whether the whole hash shall be wiped now
@@@ -989,14 -971,13 +989,14 @@@
   * Check when we heard last time from our own claims, and remove them in case of
   * a time out, or clean all claims if now is set
   */
 -static void bla_purge_claims(struct bat_priv *bat_priv,
 -			     struct hard_iface *primary_if, int now)
 +static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 +				    struct batadv_hard_iface *primary_if,
 +				    int now)
  {
 -	struct claim *claim;
 +	struct batadv_claim *claim;
  	struct hlist_node *node;
  	struct hlist_head *head;
 -	struct hashtable_t *hash;
 +	struct batadv_hashtable *hash;
  	int i;
  
  	hash = bat_priv->claim_hash;
@@@ -1010,42 -991,42 +1010,42 @@@
  		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
  			if (now)
  				goto purge_now;
 -			if (!compare_eth(claim->backbone_gw->orig,
 -					 primary_if->net_dev->dev_addr))
 +			if (!batadv_compare_eth(claim->backbone_gw->orig,
 +						primary_if->net_dev->dev_addr))
  				continue;
 -			if (!has_timed_out(claim->lasttime,
 -					   BLA_CLAIM_TIMEOUT))
 +			if (!batadv_has_timed_out(claim->lasttime,
 +						  BATADV_BLA_CLAIM_TIMEOUT))
  				continue;
  
 -			bat_dbg(DBG_BLA, bat_priv,
 -				"bla_purge_claims(): %pM, vid %d, time out\n",
 -				claim->addr, claim->vid);
 +			batadv_dbg(BATADV_DBG_BLA, bat_priv,
 +				   "bla_purge_claims(): %pM, vid %d, time out\n",
 +				   claim->addr, claim->vid);
  
  purge_now:
 -			handle_unclaim(bat_priv, primary_if,
 -				       claim->backbone_gw->orig,
 -				       claim->addr, claim->vid);
 +			batadv_handle_unclaim(bat_priv, primary_if,
 +					      claim->backbone_gw->orig,
 +					      claim->addr, claim->vid);
  		}
  		rcu_read_unlock();
  	}
  }
  
  /**
 + * batadv_bla_update_orig_address
   * @bat_priv: the bat priv with all the soft interface information
   * @primary_if: the new selected primary_if
   * @oldif: the old primary interface, may be NULL
   *
   * Update the backbone gateways when the own orig address changes.
 - *
   */
 -void bla_update_orig_address(struct bat_priv *bat_priv,
 -			     struct hard_iface *primary_if,
 -			     struct hard_iface *oldif)
 +void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 +				    struct batadv_hard_iface *primary_if,
 +				    struct batadv_hard_iface *oldif)
  {
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  	struct hlist_node *node;
  	struct hlist_head *head;
 -	struct hashtable_t *hash;
 +	struct batadv_hashtable *hash;
  	int i;
  
  	/* reset bridge loop avoidance group id */
@@@ -1053,8 -1034,8 +1053,8 @@@
  		htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
  
  	if (!oldif) {
 -		bla_purge_claims(bat_priv, NULL, 1);
 -		bla_purge_backbone_gw(bat_priv, 1);
 +		batadv_bla_purge_claims(bat_priv, NULL, 1);
 +		batadv_bla_purge_backbone_gw(bat_priv, 1);
  		return;
  	}
  
@@@ -1068,8 -1049,8 +1068,8 @@@
  		rcu_read_lock();
  		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
  			/* own orig still holds the old value. */
 -			if (!compare_eth(backbone_gw->orig,
 -					 oldif->net_dev->dev_addr))
 +			if (!batadv_compare_eth(backbone_gw->orig,
 +						oldif->net_dev->dev_addr))
  				continue;
  
  			memcpy(backbone_gw->orig,
@@@ -1077,7 -1058,7 +1077,7 @@@
  			/* send an announce frame so others will ask for our
  			 * claims and update their tables.
  			 */
 -			bla_send_announce(bat_priv, backbone_gw);
 +			batadv_bla_send_announce(bat_priv, backbone_gw);
  		}
  		rcu_read_unlock();
  	}
@@@ -1086,36 -1067,36 +1086,36 @@@
  
  
  /* (re)start the timer */
 -static void bla_start_timer(struct bat_priv *bat_priv)
 +static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
  {
 -	INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
 -	queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
 -			   msecs_to_jiffies(BLA_PERIOD_LENGTH));
 +	INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
 +	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
 +			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
  }
  
  /* periodic work to do:
   *  * purge structures when they are too old
   *  * send announcements
   */
 -static void bla_periodic_work(struct work_struct *work)
 +static void batadv_bla_periodic_work(struct work_struct *work)
  {
  	struct delayed_work *delayed_work =
  		container_of(work, struct delayed_work, work);
 -	struct bat_priv *bat_priv =
 -		container_of(delayed_work, struct bat_priv, bla_work);
 +	struct batadv_priv *bat_priv;
  	struct hlist_node *node;
  	struct hlist_head *head;
 -	struct backbone_gw *backbone_gw;
 -	struct hashtable_t *hash;
 -	struct hard_iface *primary_if;
 +	struct batadv_backbone_gw *backbone_gw;
 +	struct batadv_hashtable *hash;
 +	struct batadv_hard_iface *primary_if;
  	int i;
  
 -	primary_if = primary_if_get_selected(bat_priv);
 +	bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  	if (!primary_if)
  		goto out;
  
 -	bla_purge_claims(bat_priv, primary_if, 0);
 -	bla_purge_backbone_gw(bat_priv, 0);
 +	batadv_bla_purge_claims(bat_priv, primary_if, 0);
 +	batadv_bla_purge_backbone_gw(bat_priv, 0);
  
  	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
  		goto out;
@@@ -1129,81 -1110,67 +1129,81 @@@
  
  		rcu_read_lock();
  		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
 -			if (!compare_eth(backbone_gw->orig,
 -					 primary_if->net_dev->dev_addr))
 +			if (!batadv_compare_eth(backbone_gw->orig,
 +						primary_if->net_dev->dev_addr))
  				continue;
  
  			backbone_gw->lasttime = jiffies;
  
 -			bla_send_announce(bat_priv, backbone_gw);
 +			batadv_bla_send_announce(bat_priv, backbone_gw);
  		}
  		rcu_read_unlock();
  	}
  out:
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  
 -	bla_start_timer(bat_priv);
 +	batadv_bla_start_timer(bat_priv);
  }
  
 +/* The hash for claim and backbone hash receive the same key because they
 + * are getting initialized by hash_new with the same key. Reinitializing
 + * them with to different keys to allow nested locking without generating
 + * lockdep warnings
 + */
 +static struct lock_class_key batadv_claim_hash_lock_class_key;
 +static struct lock_class_key batadv_backbone_hash_lock_class_key;
 +
  /* initialize all bla structures */
 -int bla_init(struct bat_priv *bat_priv)
 +int batadv_bla_init(struct batadv_priv *bat_priv)
  {
  	int i;
  	uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
 -	struct hard_iface *primary_if;
 +	struct batadv_hard_iface *primary_if;
  
 -	bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
  
  	/* setting claim destination address */
  	memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
  	bat_priv->claim_dest.type = 0;
 -	primary_if = primary_if_get_selected(bat_priv);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  	if (primary_if) {
  		bat_priv->claim_dest.group =
  			htons(crc16(0, primary_if->net_dev->dev_addr,
  				    ETH_ALEN));
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  	} else {
  		bat_priv->claim_dest.group = 0; /* will be set later */
  	}
  
  	/* initialize the duplicate list */
 -	for (i = 0; i < DUPLIST_SIZE; i++)
 +	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
  		bat_priv->bcast_duplist[i].entrytime =
 -			jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
 +			jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
  	bat_priv->bcast_duplist_curr = 0;
  
  	if (bat_priv->claim_hash)
 -		return 1;
 +		return 0;
  
 -	bat_priv->claim_hash = hash_new(128);
 -	bat_priv->backbone_hash = hash_new(32);
 +	bat_priv->claim_hash = batadv_hash_new(128);
 +	bat_priv->backbone_hash = batadv_hash_new(32);
  
  	if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
 -		return -1;
 +		return -ENOMEM;
  
 -	bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
 +	batadv_hash_set_lock_class(bat_priv->claim_hash,
 +				   &batadv_claim_hash_lock_class_key);
 +	batadv_hash_set_lock_class(bat_priv->backbone_hash,
 +				   &batadv_backbone_hash_lock_class_key);
  
 -	bla_start_timer(bat_priv);
 -	return 1;
 +	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
 +
 +	batadv_bla_start_timer(bat_priv);
 +	return 0;
  }
  
  /**
 + * batadv_bla_check_bcast_duplist
   * @bat_priv: the bat priv with all the soft interface information
   * @bcast_packet: originator mac address
   * @hdr_size: maximum length of the frame
@@@ -1216,15 -1183,17 +1216,15 @@@
   * with a good chance that it is the same packet. If it is furthermore
   * sent by another host, drop it. We allow equal packets from
   * the same host however as this might be intended.
 - *
 - **/
 -
 -int bla_check_bcast_duplist(struct bat_priv *bat_priv,
 -			    struct bcast_packet *bcast_packet,
 -			    int hdr_size)
 + */
 +int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 +				   struct batadv_bcast_packet *bcast_packet,
 +				   int hdr_size)
  {
  	int i, length, curr;
  	uint8_t *content;
  	uint16_t crc;
 -	struct bcast_duplist_entry *entry;
 +	struct batadv_bcast_duplist_entry *entry;
  
  	length = hdr_size - sizeof(*bcast_packet);
  	content = (uint8_t *)bcast_packet;
@@@ -1233,21 -1202,20 +1233,21 @@@
  	/* calculate the crc ... */
  	crc = crc16(0, content, length);
  
 -	for (i = 0 ; i < DUPLIST_SIZE; i++) {
 -		curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
 +	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
 +		curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
  		entry = &bat_priv->bcast_duplist[curr];
  
  		/* we can stop searching if the entry is too old ;
  		 * later entries will be even older
  		 */
 -		if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
 +		if (batadv_has_timed_out(entry->entrytime,
 +					 BATADV_DUPLIST_TIMEOUT))
  			break;
  
  		if (entry->crc != crc)
  			continue;
  
 -		if (compare_eth(entry->orig, bcast_packet->orig))
 +		if (batadv_compare_eth(entry->orig, bcast_packet->orig))
  			continue;
  
  		/* this entry seems to match: same crc, not too old,
@@@ -1256,8 -1224,7 +1256,8 @@@
  		return 1;
  	}
  	/* not found, add a new entry (overwrite the oldest entry) */
 -	curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
 +	curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
 +	curr %= BATADV_DUPLIST_SIZE;
  	entry = &bat_priv->bcast_duplist[curr];
  	entry->crc = crc;
  	entry->entrytime = jiffies;
@@@ -1270,19 -1237,22 +1270,19 @@@
  
  
  
 -/**
 - * @bat_priv: the bat priv with all the soft interface information
 +/* @bat_priv: the bat priv with all the soft interface information
   * @orig: originator mac address
   *
   * check if the originator is a gateway for any VLAN ID.
   *
   * returns 1 if it is found, 0 otherwise
 - *
   */
 -
 -int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
 +int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
  {
 -	struct hashtable_t *hash = bat_priv->backbone_hash;
 +	struct batadv_hashtable *hash = bat_priv->backbone_hash;
  	struct hlist_head *head;
  	struct hlist_node *node;
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  	int i;
  
  	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
@@@ -1296,7 -1266,7 +1296,7 @@@
  
  		rcu_read_lock();
  		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
 -			if (compare_eth(backbone_gw->orig, orig)) {
 +			if (batadv_compare_eth(backbone_gw->orig, orig)) {
  				rcu_read_unlock();
  				return 1;
  			}
@@@ -1309,7 -1279,6 +1309,7 @@@
  
  
  /**
 + * batadv_bla_is_backbone_gw
   * @skb: the frame to be checked
   * @orig_node: the orig_node of the frame
   * @hdr_size: maximum length of the frame
@@@ -1317,13 -1286,14 +1317,13 @@@
   * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
   * if the orig_node is also a gateway on the soft interface, otherwise it
   * returns 0.
 - *
   */
 -int bla_is_backbone_gw(struct sk_buff *skb,
 -		       struct orig_node *orig_node, int hdr_size)
 +int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 +			      struct batadv_orig_node *orig_node, int hdr_size)
  {
  	struct ethhdr *ethhdr;
  	struct vlan_ethhdr *vhdr;
 -	struct backbone_gw *backbone_gw;
 +	struct batadv_backbone_gw *backbone_gw;
  	short vid = -1;
  
  	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
@@@ -1345,42 -1315,43 +1345,43 @@@
  	}
  
  	/* see if this originator is a backbone gw for this VLAN */
 -
 -	backbone_gw = backbone_hash_find(orig_node->bat_priv,
 -					 orig_node->orig, vid);
 +	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
 +						orig_node->orig, vid);
  	if (!backbone_gw)
  		return 0;
  
 -	backbone_gw_free_ref(backbone_gw);
 +	batadv_backbone_gw_free_ref(backbone_gw);
  	return 1;
  }
  
  /* free all bla structures (for softinterface free or module unload) */
 -void bla_free(struct bat_priv *bat_priv)
 +void batadv_bla_free(struct batadv_priv *bat_priv)
  {
 -	struct hard_iface *primary_if;
 +	struct batadv_hard_iface *primary_if;
  
  	cancel_delayed_work_sync(&bat_priv->bla_work);
 -	primary_if = primary_if_get_selected(bat_priv);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  
  	if (bat_priv->claim_hash) {
 -		bla_purge_claims(bat_priv, primary_if, 1);
 -		hash_destroy(bat_priv->claim_hash);
 +		batadv_bla_purge_claims(bat_priv, primary_if, 1);
 +		batadv_hash_destroy(bat_priv->claim_hash);
  		bat_priv->claim_hash = NULL;
  	}
  	if (bat_priv->backbone_hash) {
 -		bla_purge_backbone_gw(bat_priv, 1);
 -		hash_destroy(bat_priv->backbone_hash);
 +		batadv_bla_purge_backbone_gw(bat_priv, 1);
 +		batadv_hash_destroy(bat_priv->backbone_hash);
  		bat_priv->backbone_hash = NULL;
  	}
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  }
  
  /**
 + * batadv_bla_rx
   * @bat_priv: the bat priv with all the soft interface information
   * @skb: the frame to be checked
   * @vid: the VLAN ID of the frame
+  * @is_bcast: the packet came in a broadcast packet type.
   *
   * bla_rx avoidance checks if:
   *  * we have to race for a claim
@@@ -1389,17 -1360,19 +1390,18 @@@
   * in these cases, the skb is further handled by this function and
   * returns 1, otherwise it returns 0 and the caller shall further
   * process the skb.
 - *
   */
- int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
 -int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
 -	   bool is_bcast)
++int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
++		  bool is_bcast)
  {
  	struct ethhdr *ethhdr;
 -	struct claim search_claim, *claim = NULL;
 -	struct hard_iface *primary_if;
 +	struct batadv_claim search_claim, *claim = NULL;
 +	struct batadv_hard_iface *primary_if;
  	int ret;
  
  	ethhdr = (struct ethhdr *)skb_mac_header(skb);
  
 -	primary_if = primary_if_get_selected(bat_priv);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  	if (!primary_if)
  		goto handled;
  
@@@ -1409,47 -1382,52 +1411,52 @@@
  
  	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
  		/* don't allow broadcasts while requests are in flight */
- 		if (is_multicast_ether_addr(ethhdr->h_dest))
+ 		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
  			goto handled;
  
  	memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
  	search_claim.vid = vid;
 -	claim = claim_hash_find(bat_priv, &search_claim);
 +	claim = batadv_claim_hash_find(bat_priv, &search_claim);
  
  	if (!claim) {
  		/* possible optimization: race for a claim */
  		/* No claim exists yet, claim it for us!
  		 */
 -		handle_claim(bat_priv, primary_if,
 -			     primary_if->net_dev->dev_addr,
 -			     ethhdr->h_source, vid);
 +		batadv_handle_claim(bat_priv, primary_if,
 +				    primary_if->net_dev->dev_addr,
 +				    ethhdr->h_source, vid);
  		goto allow;
  	}
  
  	/* if it is our own claim ... */
 -	if (compare_eth(claim->backbone_gw->orig,
 -			primary_if->net_dev->dev_addr)) {
 +	if (batadv_compare_eth(claim->backbone_gw->orig,
 +			       primary_if->net_dev->dev_addr)) {
  		/* ... allow it in any case */
  		claim->lasttime = jiffies;
  		goto allow;
  	}
  
  	/* if it is a broadcast ... */
- 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
- 		/* ... drop it. the responsible gateway is in charge. */
+ 	if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+ 		/* ... drop it. the responsible gateway is in charge.
+ 		 *
+ 		 * We need to check is_bcast because with the gateway
+ 		 * feature, broadcasts (like DHCP requests) may be sent
+ 		 * using a unicast packet type.
+ 		 */
  		goto handled;
  	} else {
  		/* seems the client considers us as its best gateway.
  		 * send a claim and update the claim table
  		 * immediately.
  		 */
 -		handle_claim(bat_priv, primary_if,
 -			     primary_if->net_dev->dev_addr,
 -			     ethhdr->h_source, vid);
 +		batadv_handle_claim(bat_priv, primary_if,
 +				    primary_if->net_dev->dev_addr,
 +				    ethhdr->h_source, vid);
  		goto allow;
  	}
  allow:
 -	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
 +	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
  	ret = 0;
  	goto out;
  
@@@ -1459,14 -1437,13 +1466,14 @@@ handled
  
  out:
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  	if (claim)
 -		claim_free_ref(claim);
 +		batadv_claim_free_ref(claim);
  	return ret;
  }
  
  /**
 + * batadv_bla_tx
   * @bat_priv: the bat priv with all the soft interface information
   * @skb: the frame to be checked
   * @vid: the VLAN ID of the frame
@@@ -1478,15 -1455,16 +1485,15 @@@
   * in these cases, the skb is further handled by this function and
   * returns 1, otherwise it returns 0 and the caller shall further
   * process the skb.
 - *
   */
 -int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
 +int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
  {
  	struct ethhdr *ethhdr;
 -	struct claim search_claim, *claim = NULL;
 -	struct hard_iface *primary_if;
 +	struct batadv_claim search_claim, *claim = NULL;
 +	struct batadv_hard_iface *primary_if;
  	int ret = 0;
  
 -	primary_if = primary_if_get_selected(bat_priv);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  	if (!primary_if)
  		goto out;
  
@@@ -1496,7 -1474,7 +1503,7 @@@
  	/* in VLAN case, the mac header might not be set. */
  	skb_reset_mac_header(skb);
  
 -	if (bla_process_claim(bat_priv, primary_if, skb))
 +	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
  		goto handled;
  
  	ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@@ -1509,21 -1487,21 +1516,21 @@@
  	memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
  	search_claim.vid = vid;
  
 -	claim = claim_hash_find(bat_priv, &search_claim);
 +	claim = batadv_claim_hash_find(bat_priv, &search_claim);
  
  	/* if no claim exists, allow it. */
  	if (!claim)
  		goto allow;
  
  	/* check if we are responsible. */
 -	if (compare_eth(claim->backbone_gw->orig,
 -			primary_if->net_dev->dev_addr)) {
 +	if (batadv_compare_eth(claim->backbone_gw->orig,
 +			       primary_if->net_dev->dev_addr)) {
  		/* if yes, the client has roamed and we have
  		 * to unclaim it.
  		 */
 -		handle_unclaim(bat_priv, primary_if,
 -			       primary_if->net_dev->dev_addr,
 -			       ethhdr->h_source, vid);
 +		batadv_handle_unclaim(bat_priv, primary_if,
 +				      primary_if->net_dev->dev_addr,
 +				      ethhdr->h_source, vid);
  		goto allow;
  	}
  
@@@ -1540,34 -1518,33 +1547,34 @@@
  		goto allow;
  	}
  allow:
 -	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
 +	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
  	ret = 0;
  	goto out;
  handled:
  	ret = 1;
  out:
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  	if (claim)
 -		claim_free_ref(claim);
 +		batadv_claim_free_ref(claim);
  	return ret;
  }
  
 -int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 +int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
  {
  	struct net_device *net_dev = (struct net_device *)seq->private;
 -	struct bat_priv *bat_priv = netdev_priv(net_dev);
 -	struct hashtable_t *hash = bat_priv->claim_hash;
 -	struct claim *claim;
 -	struct hard_iface *primary_if;
 +	struct batadv_priv *bat_priv = netdev_priv(net_dev);
 +	struct batadv_hashtable *hash = bat_priv->claim_hash;
 +	struct batadv_claim *claim;
 +	struct batadv_hard_iface *primary_if;
  	struct hlist_node *node;
  	struct hlist_head *head;
  	uint32_t i;
  	bool is_own;
  	int ret = 0;
 +	uint8_t *primary_addr;
  
 -	primary_if = primary_if_get_selected(bat_priv);
 +	primary_if = batadv_primary_if_get_selected(bat_priv);
  	if (!primary_if) {
  		ret = seq_printf(seq,
  				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@@ -1575,17 -1552,16 +1582,17 @@@
  		goto out;
  	}
  
 -	if (primary_if->if_status != IF_ACTIVE) {
 +	if (primary_if->if_status != BATADV_IF_ACTIVE) {
  		ret = seq_printf(seq,
  				 "BATMAN mesh %s disabled - primary interface not active\n",
  				 net_dev->name);
  		goto out;
  	}
  
 +	primary_addr = primary_if->net_dev->dev_addr;
  	seq_printf(seq,
  		   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
 -		   net_dev->name, primary_if->net_dev->dev_addr,
 +		   net_dev->name, primary_addr,
  		   ntohs(bat_priv->claim_dest.group));
  	seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
  		   "Client", "VID", "Originator", "CRC");
@@@ -1594,8 -1570,8 +1601,8 @@@
  
  		rcu_read_lock();
  		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
 -			is_own = compare_eth(claim->backbone_gw->orig,
 -					     primary_if->net_dev->dev_addr);
 +			is_own = batadv_compare_eth(claim->backbone_gw->orig,
 +						    primary_addr);
  			seq_printf(seq,	" * %pM on % 5d by %pM [%c] (%04x)\n",
  				   claim->addr, claim->vid,
  				   claim->backbone_gw->orig,
@@@ -1606,6 -1582,6 +1613,6 @@@
  	}
  out:
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  	return ret;
  }
diff --combined net/batman-adv/bridge_loop_avoidance.h
index 08d13cb,dc5227b..563cfbf
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@@ -1,4 -1,5 +1,4 @@@
 -/*
 - * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
   *
   * Simon Wunderlich
   *
@@@ -15,82 -16,81 +15,84 @@@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
   * 02110-1301, USA
 - *
   */
  
  #ifndef _NET_BATMAN_ADV_BLA_H_
  #define _NET_BATMAN_ADV_BLA_H_
  
  #ifdef CONFIG_BATMAN_ADV_BLA
- int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
 -int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
 -	   bool is_bcast);
 -int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
 -int bla_is_backbone_gw(struct sk_buff *skb,
 -		       struct orig_node *orig_node, int hdr_size);
 -int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 -int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
 -int bla_check_bcast_duplist(struct bat_priv *bat_priv,
 -			    struct bcast_packet *bcast_packet, int hdr_size);
 -void bla_update_orig_address(struct bat_priv *bat_priv,
 -			     struct hard_iface *primary_if,
 -			     struct hard_iface *oldif);
 -int bla_init(struct bat_priv *bat_priv);
 -void bla_free(struct bat_priv *bat_priv);
++int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
++		  bool is_bcast);
 +int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
 +int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 +			      struct batadv_orig_node *orig_node, int hdr_size);
 +int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 +int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
 +int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 +				   struct batadv_bcast_packet *bcast_packet,
 +				   int hdr_size);
 +void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 +				    struct batadv_hard_iface *primary_if,
 +				    struct batadv_hard_iface *oldif);
 +int batadv_bla_init(struct batadv_priv *bat_priv);
 +void batadv_bla_free(struct batadv_priv *bat_priv);
  
 -#define BLA_CRC_INIT	0
 +#define BATADV_BLA_CRC_INIT	0
  #else /* ifdef CONFIG_BATMAN_ADV_BLA */
  
 -static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
 -			 short vid, bool is_bcast)
 +static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
- 				struct sk_buff *skb, short vid)
++				struct sk_buff *skb, short vid,
++				bool is_bcast)
  {
  	return 0;
  }
  
 -static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
 -			 short vid)
 +static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
 +				struct sk_buff *skb, short vid)
  {
  	return 0;
  }
  
 -static inline int bla_is_backbone_gw(struct sk_buff *skb,
 -				     struct orig_node *orig_node,
 -				     int hdr_size)
 +static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 +					    struct batadv_orig_node *orig_node,
 +					    int hdr_size)
  {
  	return 0;
  }
  
 -static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
 -						 void *offset)
 +static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
 +							void *offset)
  {
  	return 0;
  }
  
 -static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
 -					  uint8_t *orig)
 +static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
 +						 uint8_t *orig)
  {
  	return 0;
  }
  
 -static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
 -					  struct bcast_packet *bcast_packet,
 -					  int hdr_size)
 +static inline int
 +batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 +			       struct batadv_bcast_packet *bcast_packet,
 +			       int hdr_size)
  {
  	return 0;
  }
  
 -static inline void bla_update_orig_address(struct bat_priv *bat_priv,
 -					   struct hard_iface *primary_if,
 -					   struct hard_iface *oldif)
 +static inline void
 +batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 +			       struct batadv_hard_iface *primary_if,
 +			       struct batadv_hard_iface *oldif)
  {
  }
  
 -static inline int bla_init(struct bat_priv *bat_priv)
 +static inline int batadv_bla_init(struct batadv_priv *bat_priv)
  {
  	return 1;
  }
  
 -static inline void bla_free(struct bat_priv *bat_priv)
 +static inline void batadv_bla_free(struct batadv_priv *bat_priv)
  {
  }
  
diff --combined net/batman-adv/soft-interface.c
index 9e4bb61,a0ec0e4..109ea2a
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@@ -1,4 -1,5 +1,4 @@@
 -/*
 - * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -15,6 -16,7 +15,6 @@@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
   * 02110-1301, USA
 - *
   */
  
  #include "main.h"
@@@ -22,12 -24,12 +22,12 @@@
  #include "hard-interface.h"
  #include "routing.h"
  #include "send.h"
 -#include "bat_debugfs.h"
 +#include "debugfs.h"
  #include "translation-table.h"
  #include "hash.h"
  #include "gateway_common.h"
  #include "gateway_client.h"
 -#include "bat_sysfs.h"
 +#include "sysfs.h"
  #include "originator.h"
  #include <linux/slab.h>
  #include <linux/ethtool.h>
@@@ -37,33 -39,27 +37,33 @@@
  #include "bridge_loop_avoidance.h"
  
  
 -static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 -static void bat_get_drvinfo(struct net_device *dev,
 -			    struct ethtool_drvinfo *info);
 -static u32 bat_get_msglevel(struct net_device *dev);
 -static void bat_set_msglevel(struct net_device *dev, u32 value);
 -static u32 bat_get_link(struct net_device *dev);
 -
 -static const struct ethtool_ops bat_ethtool_ops = {
 -	.get_settings = bat_get_settings,
 -	.get_drvinfo = bat_get_drvinfo,
 -	.get_msglevel = bat_get_msglevel,
 -	.set_msglevel = bat_set_msglevel,
 -	.get_link = bat_get_link,
 +static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 +static void batadv_get_drvinfo(struct net_device *dev,
 +			       struct ethtool_drvinfo *info);
 +static u32 batadv_get_msglevel(struct net_device *dev);
 +static void batadv_set_msglevel(struct net_device *dev, u32 value);
 +static u32 batadv_get_link(struct net_device *dev);
 +static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
 +static void batadv_get_ethtool_stats(struct net_device *dev,
 +				     struct ethtool_stats *stats, u64 *data);
 +static int batadv_get_sset_count(struct net_device *dev, int stringset);
 +
 +static const struct ethtool_ops batadv_ethtool_ops = {
 +	.get_settings = batadv_get_settings,
 +	.get_drvinfo = batadv_get_drvinfo,
 +	.get_msglevel = batadv_get_msglevel,
 +	.set_msglevel = batadv_set_msglevel,
 +	.get_link = batadv_get_link,
 +	.get_strings = batadv_get_strings,
 +	.get_ethtool_stats = batadv_get_ethtool_stats,
 +	.get_sset_count = batadv_get_sset_count,
  };
  
 -int my_skb_head_push(struct sk_buff *skb, unsigned int len)
 +int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
  {
  	int result;
  
 -	/**
 -	 * TODO: We must check if we can release all references to non-payload
 +	/* TODO: We must check if we can release all references to non-payload
  	 * data using skb_header_release in our skbs to allow skb_cow_header to
  	 * work optimally. This means that those skbs are not allowed to read
  	 * or write any data which is before the current position of skb->data
@@@ -78,37 -74,37 +78,37 @@@
  	return 0;
  }
  
 -static int interface_open(struct net_device *dev)
 +static int batadv_interface_open(struct net_device *dev)
  {
  	netif_start_queue(dev);
  	return 0;
  }
  
 -static int interface_release(struct net_device *dev)
 +static int batadv_interface_release(struct net_device *dev)
  {
  	netif_stop_queue(dev);
  	return 0;
  }
  
 -static struct net_device_stats *interface_stats(struct net_device *dev)
 +static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
  {
 -	struct bat_priv *bat_priv = netdev_priv(dev);
 +	struct batadv_priv *bat_priv = netdev_priv(dev);
  	return &bat_priv->stats;
  }
  
 -static int interface_set_mac_addr(struct net_device *dev, void *p)
 +static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
  {
 -	struct bat_priv *bat_priv = netdev_priv(dev);
 +	struct batadv_priv *bat_priv = netdev_priv(dev);
  	struct sockaddr *addr = p;
  
  	if (!is_valid_ether_addr(addr->sa_data))
  		return -EADDRNOTAVAIL;
  
  	/* only modify transtable if it has been initialized before */
 -	if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
 -		tt_local_remove(bat_priv, dev->dev_addr,
 -				"mac address changed", false);
 -		tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
 +	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
 +		batadv_tt_local_remove(bat_priv, dev->dev_addr,
 +				       "mac address changed", false);
 +		batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
  	}
  
  	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@@ -116,10 -112,10 +116,10 @@@
  	return 0;
  }
  
 -static int interface_change_mtu(struct net_device *dev, int new_mtu)
 +static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
  {
  	/* check ranges */
 -	if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
 +	if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev)))
  		return -EINVAL;
  
  	dev->mtu = new_mtu;
@@@ -127,15 -123,13 +127,15 @@@
  	return 0;
  }
  
 -static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 +static int batadv_interface_tx(struct sk_buff *skb,
 +			       struct net_device *soft_iface)
  {
  	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 -	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 -	struct hard_iface *primary_if = NULL;
 -	struct bcast_packet *bcast_packet;
 +	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 +	struct batadv_hard_iface *primary_if = NULL;
 +	struct batadv_bcast_packet *bcast_packet;
  	struct vlan_ethhdr *vhdr;
 +	__be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
  	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
  						   0x00};
  	unsigned int header_len = 0;
@@@ -143,7 -137,7 +143,7 @@@
  	short vid __maybe_unused = -1;
  	bool do_bcast = false;
  
 -	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
 +	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
  		goto dropped;
  
  	soft_iface->trans_start = jiffies;
@@@ -153,47 -147,45 +153,47 @@@
  		vhdr = (struct vlan_ethhdr *)skb->data;
  		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
  
 -		if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN)
 +		if (vhdr->h_vlan_encapsulated_proto != ethertype)
  			break;
  
  		/* fall through */
 -	case ETH_P_BATMAN:
 +	case BATADV_ETH_P_BATMAN:
  		goto dropped;
  	}
  
 -	if (bla_tx(bat_priv, skb, vid))
 +	if (batadv_bla_tx(bat_priv, skb, vid))
  		goto dropped;
  
  	/* Register the client MAC in the transtable */
 -	tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 +	batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
  
  	/* don't accept stp packets. STP does not help in meshes.
  	 * better use the bridge loop avoidance ...
  	 */
 -	if (compare_eth(ethhdr->h_dest, stp_addr))
 +	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
  		goto dropped;
  
  	if (is_multicast_ether_addr(ethhdr->h_dest)) {
  		do_bcast = true;
  
  		switch (atomic_read(&bat_priv->gw_mode)) {
 -		case GW_MODE_SERVER:
 +		case BATADV_GW_MODE_SERVER:
  			/* gateway servers should not send dhcp
 -			 * requests into the mesh */
 -			ret = gw_is_dhcp_target(skb, &header_len);
 +			 * requests into the mesh
 +			 */
 +			ret = batadv_gw_is_dhcp_target(skb, &header_len);
  			if (ret)
  				goto dropped;
  			break;
 -		case GW_MODE_CLIENT:
 +		case BATADV_GW_MODE_CLIENT:
  			/* gateway clients should send dhcp requests
 -			 * via unicast to their gateway */
 -			ret = gw_is_dhcp_target(skb, &header_len);
 +			 * via unicast to their gateway
 +			 */
 +			ret = batadv_gw_is_dhcp_target(skb, &header_len);
  			if (ret)
  				do_bcast = false;
  			break;
 -		case GW_MODE_OFF:
 +		case BATADV_GW_MODE_OFF:
  		default:
  			break;
  		}
@@@ -201,24 -193,22 +201,24 @@@
  
  	/* ethernet packet should be broadcasted */
  	if (do_bcast) {
 -		primary_if = primary_if_get_selected(bat_priv);
 +		primary_if = batadv_primary_if_get_selected(bat_priv);
  		if (!primary_if)
  			goto dropped;
  
 -		if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
 +		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
  			goto dropped;
  
 -		bcast_packet = (struct bcast_packet *)skb->data;
 -		bcast_packet->header.version = COMPAT_VERSION;
 -		bcast_packet->header.ttl = TTL;
 +		bcast_packet = (struct batadv_bcast_packet *)skb->data;
 +		bcast_packet->header.version = BATADV_COMPAT_VERSION;
 +		bcast_packet->header.ttl = BATADV_TTL;
  
  		/* batman packet type: broadcast */
 -		bcast_packet->header.packet_type = BAT_BCAST;
 +		bcast_packet->header.packet_type = BATADV_BCAST;
 +		bcast_packet->reserved = 0;
  
  		/* hw address of first interface is the orig mac because only
 -		 * this mac is known throughout the mesh */
 +		 * this mac is known throughout the mesh
 +		 */
  		memcpy(bcast_packet->orig,
  		       primary_if->net_dev->dev_addr, ETH_ALEN);
  
@@@ -226,22 -216,21 +226,22 @@@
  		bcast_packet->seqno =
  			htonl(atomic_inc_return(&bat_priv->bcast_seqno));
  
 -		add_bcast_packet_to_list(bat_priv, skb, 1);
 +		batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
  
  		/* a copy is stored in the bcast list, therefore removing
 -		 * the original skb. */
 +		 * the original skb.
 +		 */
  		kfree_skb(skb);
  
  	/* unicast packet */
  	} else {
 -		if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
 -			ret = gw_out_of_range(bat_priv, skb, ethhdr);
 +		if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
 +			ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
  			if (ret)
  				goto dropped;
  		}
  
 -		ret = unicast_send_skb(skb, bat_priv);
 +		ret = batadv_unicast_send_skb(skb, bat_priv);
  		if (ret != 0)
  			goto dropped_freed;
  	}
@@@ -256,19 -245,22 +256,23 @@@ dropped_freed
  	bat_priv->stats.tx_dropped++;
  end:
  	if (primary_if)
 -		hardif_free_ref(primary_if);
 +		batadv_hardif_free_ref(primary_if);
  	return NETDEV_TX_OK;
  }
  
 -void interface_rx(struct net_device *soft_iface,
 -		  struct sk_buff *skb, struct hard_iface *recv_if,
 -		  int hdr_size)
 +void batadv_interface_rx(struct net_device *soft_iface,
 +			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
 +			 int hdr_size)
  {
 -	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 +	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
  	struct ethhdr *ethhdr;
  	struct vlan_ethhdr *vhdr;
 -	struct batman_header *batadv_header = (struct batman_header *)skb->data;
++	struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
  	short vid __maybe_unused = -1;
 +	__be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
+ 	bool is_bcast;
+ 
 -	is_bcast = (batadv_header->packet_type == BAT_BCAST);
++	is_bcast = (batadv_header->packet_type == BATADV_BCAST);
  
  	/* check if enough space is available for pulling, and pull */
  	if (!pskb_may_pull(skb, hdr_size))
@@@ -284,11 -276,11 +288,11 @@@
  		vhdr = (struct vlan_ethhdr *)skb->data;
  		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
  
 -		if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN)
 +		if (vhdr->h_vlan_encapsulated_proto != ethertype)
  			break;
  
  		/* fall through */
 -	case ETH_P_BATMAN:
 +	case BATADV_ETH_P_BATMAN:
  		goto dropped;
  	}
  
@@@ -299,23 -291,22 +303,23 @@@
  
  	/* should not be necessary anymore as we use skb_pull_rcsum()
  	 * TODO: please verify this and remove this TODO
 -	 * -- Dec 21st 2009, Simon Wunderlich */
 +	 * -- Dec 21st 2009, Simon Wunderlich
 +	 */
  
 -/*	skb->ip_summed = CHECKSUM_UNNECESSARY;*/
 +	/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
  
  	bat_priv->stats.rx_packets++;
  	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
  
  	soft_iface->last_rx = jiffies;
  
 -	if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
 +	if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
  		goto dropped;
  
  	/* Let the bridge loop avoidance check the packet. If will
  	 * not handle it, we can safely push it up.
  	 */
- 	if (batadv_bla_rx(bat_priv, skb, vid))
 -	if (bla_rx(bat_priv, skb, vid, is_bcast))
++	if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
  		goto out;
  
  	netif_rx(skb);
@@@ -327,50 -318,49 +331,50 @@@ out
  	return;
  }
  
 -static const struct net_device_ops bat_netdev_ops = {
 -	.ndo_open = interface_open,
 -	.ndo_stop = interface_release,
 -	.ndo_get_stats = interface_stats,
 -	.ndo_set_mac_address = interface_set_mac_addr,
 -	.ndo_change_mtu = interface_change_mtu,
 -	.ndo_start_xmit = interface_tx,
 +static const struct net_device_ops batadv_netdev_ops = {
 +	.ndo_open = batadv_interface_open,
 +	.ndo_stop = batadv_interface_release,
 +	.ndo_get_stats = batadv_interface_stats,
 +	.ndo_set_mac_address = batadv_interface_set_mac_addr,
 +	.ndo_change_mtu = batadv_interface_change_mtu,
 +	.ndo_start_xmit = batadv_interface_tx,
  	.ndo_validate_addr = eth_validate_addr
  };
  
 -static void interface_setup(struct net_device *dev)
 +static void batadv_interface_setup(struct net_device *dev)
  {
 -	struct bat_priv *priv = netdev_priv(dev);
 +	struct batadv_priv *priv = netdev_priv(dev);
  
  	ether_setup(dev);
  
 -	dev->netdev_ops = &bat_netdev_ops;
 +	dev->netdev_ops = &batadv_netdev_ops;
  	dev->destructor = free_netdev;
  	dev->tx_queue_len = 0;
  
 -	/**
 -	 * can't call min_mtu, because the needed variables
 +	/* can't call min_mtu, because the needed variables
  	 * have not been initialized yet
  	 */
  	dev->mtu = ETH_DATA_LEN;
  	/* reserve more space in the skbuff for our header */
 -	dev->hard_header_len = BAT_HEADER_LEN;
 +	dev->hard_header_len = BATADV_HEADER_LEN;
  
  	/* generate random address */
  	eth_hw_addr_random(dev);
  
 -	SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
 +	SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
  
  	memset(priv, 0, sizeof(*priv));
  }
  
 -struct net_device *softif_create(const char *name)
 +struct net_device *batadv_softif_create(const char *name)
  {
  	struct net_device *soft_iface;
 -	struct bat_priv *bat_priv;
 +	struct batadv_priv *bat_priv;
  	int ret;
 +	size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
  
 -	soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
 +	soft_iface = alloc_netdev(sizeof(*bat_priv), name,
 +				  batadv_interface_setup);
  
  	if (!soft_iface)
  		goto out;
@@@ -388,18 -378,18 +392,18 @@@
  	atomic_set(&bat_priv->bonding, 0);
  	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
  	atomic_set(&bat_priv->ap_isolation, 0);
 -	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
 -	atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
 +	atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
 +	atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
  	atomic_set(&bat_priv->gw_sel_class, 20);
  	atomic_set(&bat_priv->gw_bandwidth, 41);
  	atomic_set(&bat_priv->orig_interval, 1000);
  	atomic_set(&bat_priv->hop_penalty, 30);
  	atomic_set(&bat_priv->log_level, 0);
  	atomic_set(&bat_priv->fragmentation, 1);
 -	atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
 -	atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
 +	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
 +	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
  
 -	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 +	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
  	atomic_set(&bat_priv->bcast_seqno, 1);
  	atomic_set(&bat_priv->ttvn, 0);
  	atomic_set(&bat_priv->tt_local_changes, 0);
@@@ -413,34 -403,28 +417,34 @@@
  	bat_priv->primary_if = NULL;
  	bat_priv->num_ifaces = 0;
  
 -	ret = bat_algo_select(bat_priv, bat_routing_algo);
 -	if (ret < 0)
 +	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
 +	if (!bat_priv->bat_counters)
  		goto unreg_soft_iface;
  
 -	ret = sysfs_add_meshif(soft_iface);
 +	ret = batadv_algo_select(bat_priv, batadv_routing_algo);
  	if (ret < 0)
 -		goto unreg_soft_iface;
 +		goto free_bat_counters;
  
 -	ret = debugfs_add_meshif(soft_iface);
 +	ret = batadv_sysfs_add_meshif(soft_iface);
 +	if (ret < 0)
 +		goto free_bat_counters;
 +
 +	ret = batadv_debugfs_add_meshif(soft_iface);
  	if (ret < 0)
  		goto unreg_sysfs;
  
 -	ret = mesh_init(soft_iface);
 +	ret = batadv_mesh_init(soft_iface);
  	if (ret < 0)
  		goto unreg_debugfs;
  
  	return soft_iface;
  
  unreg_debugfs:
 -	debugfs_del_meshif(soft_iface);
 +	batadv_debugfs_del_meshif(soft_iface);
  unreg_sysfs:
 -	sysfs_del_meshif(soft_iface);
 +	batadv_sysfs_del_meshif(soft_iface);
 +free_bat_counters:
 +	free_percpu(bat_priv->bat_counters);
  unreg_soft_iface:
  	unregister_netdevice(soft_iface);
  	return NULL;
@@@ -451,24 -435,24 +455,24 @@@ out
  	return NULL;
  }
  
 -void softif_destroy(struct net_device *soft_iface)
 +void batadv_softif_destroy(struct net_device *soft_iface)
  {
 -	debugfs_del_meshif(soft_iface);
 -	sysfs_del_meshif(soft_iface);
 -	mesh_free(soft_iface);
 +	batadv_debugfs_del_meshif(soft_iface);
 +	batadv_sysfs_del_meshif(soft_iface);
 +	batadv_mesh_free(soft_iface);
  	unregister_netdevice(soft_iface);
  }
  
 -int softif_is_valid(const struct net_device *net_dev)
 +int batadv_softif_is_valid(const struct net_device *net_dev)
  {
 -	if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
 +	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
  		return 1;
  
  	return 0;
  }
  
  /* ethtool */
 -static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  {
  	cmd->supported = 0;
  	cmd->advertising = 0;
@@@ -484,73 -468,25 +488,73 @@@
  	return 0;
  }
  
 -static void bat_get_drvinfo(struct net_device *dev,
 -			    struct ethtool_drvinfo *info)
 +static void batadv_get_drvinfo(struct net_device *dev,
 +			       struct ethtool_drvinfo *info)
  {
  	strcpy(info->driver, "B.A.T.M.A.N. advanced");
 -	strcpy(info->version, SOURCE_VERSION);
 +	strcpy(info->version, BATADV_SOURCE_VERSION);
  	strcpy(info->fw_version, "N/A");
  	strcpy(info->bus_info, "batman");
  }
  
 -static u32 bat_get_msglevel(struct net_device *dev)
 +static u32 batadv_get_msglevel(struct net_device *dev)
  {
  	return -EOPNOTSUPP;
  }
  
 -static void bat_set_msglevel(struct net_device *dev, u32 value)
 +static void batadv_set_msglevel(struct net_device *dev, u32 value)
  {
  }
  
 -static u32 bat_get_link(struct net_device *dev)
 +static u32 batadv_get_link(struct net_device *dev)
  {
  	return 1;
  }
 +
 +/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
 + * Declare each description string in struct.name[] to get fixed sized buffer
 + * and compile time checking for strings longer than ETH_GSTRING_LEN.
 + */
 +static const struct {
 +	const char name[ETH_GSTRING_LEN];
 +} batadv_counters_strings[] = {
 +	{ "forward" },
 +	{ "forward_bytes" },
 +	{ "mgmt_tx" },
 +	{ "mgmt_tx_bytes" },
 +	{ "mgmt_rx" },
 +	{ "mgmt_rx_bytes" },
 +	{ "tt_request_tx" },
 +	{ "tt_request_rx" },
 +	{ "tt_response_tx" },
 +	{ "tt_response_rx" },
 +	{ "tt_roam_adv_tx" },
 +	{ "tt_roam_adv_rx" },
 +};
 +
 +static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
 +			       uint8_t *data)
 +{
 +	if (stringset == ETH_SS_STATS)
 +		memcpy(data, batadv_counters_strings,
 +		       sizeof(batadv_counters_strings));
 +}
 +
 +static void batadv_get_ethtool_stats(struct net_device *dev,
 +				     struct ethtool_stats *stats,
 +				     uint64_t *data)
 +{
 +	struct batadv_priv *bat_priv = netdev_priv(dev);
 +	int i;
 +
 +	for (i = 0; i < BATADV_CNT_NUM; i++)
 +		data[i] = batadv_sum_counter(bat_priv, i);
 +}
 +
 +static int batadv_get_sset_count(struct net_device *dev, int stringset)
 +{
 +	if (stringset == ETH_SS_STATS)
 +		return BATADV_CNT_NUM;
 +
 +	return -EOPNOTSUPP;
 +}
diff --combined net/core/dev.c
index 5ab6f4b,0f28a9e..73e87c7
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -1691,8 -1691,7 +1691,8 @@@ static void dev_queue_xmit_nit(struct s
  	rcu_read_unlock();
  }
  
 -/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
 +/**
 + * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
   * @dev: Network device
   * @txq: number of queues available
   *
@@@ -1794,18 -1793,6 +1794,18 @@@ int netif_set_real_num_rx_queues(struc
  EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  #endif
  
 +/**
 + * netif_get_num_default_rss_queues - default number of RSS queues
 + *
 + * This routine should set an upper limit on the number of RSS queues
 + * used by default by multiqueue devices.
 + */
 +int netif_get_num_default_rss_queues(void)
 +{
 +	return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
 +}
 +EXPORT_SYMBOL(netif_get_num_default_rss_queues);
 +
  static inline void __netif_reschedule(struct Qdisc *q)
  {
  	struct softnet_data *sd;
@@@ -2457,8 -2444,12 +2457,12 @@@ static void skb_update_prio(struct sk_b
  {
  	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
  
- 	if ((!skb->priority) && (skb->sk) && map)
- 		skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+ 	if (!skb->priority && skb->sk && map) {
+ 		unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+ 
+ 		if (prioidx < map->priomap_len)
+ 			skb->priority = map->priomap[prioidx];
+ 	}
  }
  #else
  #define skb_update_prio(skb)
@@@ -2468,23 -2459,6 +2472,23 @@@ static DEFINE_PER_CPU(int, xmit_recursi
  #define RECURSION_LIMIT 10
  
  /**
 + *	dev_loopback_xmit - loop back @skb
 + *	@skb: buffer to transmit
 + */
 +int dev_loopback_xmit(struct sk_buff *skb)
 +{
 +	skb_reset_mac_header(skb);
 +	__skb_pull(skb, skb_network_offset(skb));
 +	skb->pkt_type = PACKET_LOOPBACK;
 +	skb->ip_summed = CHECKSUM_UNNECESSARY;
 +	WARN_ON(!skb_dst(skb));
 +	skb_dst_force(skb);
 +	netif_rx_ni(skb);
 +	return 0;
 +}
 +EXPORT_SYMBOL(dev_loopback_xmit);
 +
 +/**
   *	dev_queue_xmit - transmit a buffer
   *	@skb: buffer to transmit
   *
@@@ -5672,7 -5646,7 +5676,7 @@@ int netdev_refcnt_read(const struct net
  }
  EXPORT_SYMBOL(netdev_refcnt_read);
  
 -/*
 +/**
   * netdev_wait_allrefs - wait until all references are gone.
   *
   * This is called when unregistering network devices.
diff --combined net/mac80211/mlme.c
index e6fe84a,0db5d34..aa69a33
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@@ -258,11 -258,12 +258,11 @@@ static int ieee80211_compatible_rates(c
  }
  
  static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
 -				struct sk_buff *skb, const u8 *ht_oper_ie,
 +				struct sk_buff *skb, u8 ap_ht_param,
  				struct ieee80211_supported_band *sband,
  				struct ieee80211_channel *channel,
  				enum ieee80211_smps_mode smps)
  {
 -	struct ieee80211_ht_operation *ht_oper;
  	u8 *pos;
  	u32 flags = channel->flags;
  	u16 cap;
@@@ -270,13 -271,21 +270,13 @@@
  
  	BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
  
  	memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
  	ieee80211_apply_htcap_overrides(sdata, &ht_cap);
  
  	/* determine capability flags */
  	cap = ht_cap.cap;
  
 -	switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
 +	switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
  	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
  		if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
  			cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@@ -500,7 -509,7 +500,7 @@@ static void ieee80211_send_assoc(struc
  	}
  
  	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
 -		ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
 +		ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
  				    sband, local->oper_channel, ifmgd->ap_smps);
  
  	/* if present, add any custom non-vendor IEs that go after HT */
@@@ -930,6 -939,11 +930,6 @@@ void ieee80211_recalc_ps(struct ieee802
  		return;
  	}
  
 -	if (!list_empty(&local->work_list)) {
 -		local->ps_sdata = NULL;
 -		goto change;
 -	}
 -
  	list_for_each_entry(sdata, &local->interfaces, list) {
  		if (!ieee80211_sdata_running(sdata))
  			continue;
@@@ -1002,6 -1016,7 +1002,6 @@@
  		local->ps_sdata = NULL;
  	}
  
 - change:
  	ieee80211_change_ps(local);
  }
  
@@@ -1141,7 -1156,7 +1141,7 @@@ static void ieee80211_sta_wmm_params(st
  
  	memset(&params, 0, sizeof(params));
  
 -	local->wmm_acm = 0;
 +	sdata->wmm_acm = 0;
  	for (; left >= 4; left -= 4, pos += 4) {
  		int aci = (pos[0] >> 5) & 0x03;
  		int acm = (pos[0] >> 4) & 0x01;
@@@ -1152,21 -1167,21 +1152,21 @@@
  		case 1: /* AC_BK */
  			queue = 3;
  			if (acm)
 -				local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
 +				sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
  			if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
  				uapsd = true;
  			break;
  		case 2: /* AC_VI */
  			queue = 1;
  			if (acm)
 -				local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
 +				sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
  			if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
  				uapsd = true;
  			break;
  		case 3: /* AC_VO */
  			queue = 0;
  			if (acm)
 -				local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
 +				sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
  			if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
  				uapsd = true;
  			break;
@@@ -1174,7 -1189,7 +1174,7 @@@
  		default:
  			queue = 2;
  			if (acm)
 -				local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
 +				sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
  			if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
  				uapsd = true;
  			break;
@@@ -1186,16 -1201,19 +1186,16 @@@
  		params.txop = get_unaligned_le16(pos + 2);
  		params.uapsd = uapsd;
  
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -		wiphy_debug(local->hw.wiphy,
 -			    "WMM queue=%d aci=%d acm=%d aifs=%d "
 -			    "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
 -			    queue, aci, acm,
 -			    params.aifs, params.cw_min, params.cw_max,
 -			    params.txop, params.uapsd);
 -#endif
 +		mlme_dbg(sdata,
 +			 "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
 +			 queue, aci, acm,
 +			 params.aifs, params.cw_min, params.cw_max,
 +			 params.txop, params.uapsd);
  		sdata->tx_conf[queue] = params;
  		if (drv_conf_tx(local, sdata, queue, &params))
 -			wiphy_debug(local->hw.wiphy,
 -				    "failed to set TX queue parameters for queue %d\n",
 -				    queue);
 +			sdata_err(sdata,
 +				  "failed to set TX queue parameters for queue %d\n",
 +				  queue);
  	}
  
  	/* enable WMM or activate new settings */
@@@ -1272,7 -1290,7 +1272,7 @@@ static void ieee80211_set_associated(st
  
  	bss_info_changed |= BSS_CHANGED_BEACON_INT;
  	bss_info_changed |= ieee80211_handle_bss_capability(sdata,
 -		cbss->capability, bss->has_erp_value, bss->erp_value);
 +		bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value);
  
  	sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
  		IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
@@@ -1563,12 -1581,11 +1563,12 @@@ static void ieee80211_mgd_probe_ap(stru
  		goto out;
  	}
  
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  	if (beacon)
 -		net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
 -				    sdata->name);
 -#endif
 +		mlme_dbg_ratelimited(sdata,
 +				     "detected beacon loss from AP - sending probe request\n");
 +
 +	ieee80211_cqm_rssi_notify(&sdata->vif,
 +		NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
  
  	/*
  	 * The driver/our work has already reported this event or the
@@@ -1651,7 -1668,8 +1651,7 @@@ static void __ieee80211_connection_loss
  
  	memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
  
 -	printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
 -	       sdata->name, bssid);
 +	sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
  
  	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
  			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@@ -1785,10 -1803,9 +1785,10 @@@ ieee80211_rx_mgmt_auth(struct ieee80211
  		return RX_MGMT_NONE;
  
  	if (status_code != WLAN_STATUS_SUCCESS) {
 -		printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
 -		       sdata->name, mgmt->sa, status_code);
 -		goto out;
 +		sdata_info(sdata, "%pM denied authentication (status %d)\n",
 +			   mgmt->sa, status_code);
 +		ieee80211_destroy_auth_data(sdata, false);
 +		return RX_MGMT_CFG80211_RX_AUTH;
  	}
  
  	switch (ifmgd->auth_data->algorithm) {
@@@ -1809,7 -1826,8 +1809,7 @@@
  		return RX_MGMT_NONE;
  	}
  
 -	printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
 - out:
 +	sdata_info(sdata, "authenticated\n");
  	ifmgd->auth_data->done = true;
  	ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
  	run_again(ifmgd, ifmgd->auth_data->timeout);
@@@ -1822,7 -1840,8 +1822,7 @@@
  		goto out_err;
  	}
  	if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
 -		printk(KERN_DEBUG "%s: failed moving %pM to auth\n",
 -		       sdata->name, bssid);
 +		sdata_info(sdata, "failed moving %pM to auth\n", bssid);
  		goto out_err;
  	}
  	mutex_unlock(&sdata->local->sta_mtx);
@@@ -1856,8 -1875,8 +1856,8 @@@ ieee80211_rx_mgmt_deauth(struct ieee802
  
  	reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
  
 -	printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
 -			sdata->name, bssid, reason_code);
 +	sdata_info(sdata, "deauthenticated from %pM (Reason: %u)\n",
 +		   bssid, reason_code);
  
  	ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
  
@@@ -1887,8 -1906,8 +1887,8 @@@ ieee80211_rx_mgmt_disassoc(struct ieee8
  
  	reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
  
 -	printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
 -			sdata->name, mgmt->sa, reason_code);
 +	sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
 +		   mgmt->sa, reason_code);
  
  	ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
  
@@@ -1980,15 -1999,17 +1980,15 @@@ static bool ieee80211_assoc_success(str
  	capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
  
  	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
 -		printk(KERN_DEBUG
 -		       "%s: invalid AID value 0x%x; bits 15:14 not set\n",
 -		       sdata->name, aid);
 +		sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
 +			   aid);
  	aid &= ~(BIT(15) | BIT(14));
  
  	ifmgd->broken_ap = false;
  
  	if (aid == 0 || aid > IEEE80211_MAX_AID) {
 -		printk(KERN_DEBUG
 -		       "%s: invalid AID value %d (out of range), turn off PS\n",
 -		       sdata->name, aid);
 +		sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n",
 +			   aid);
  		aid = 0;
  		ifmgd->broken_ap = true;
  	}
@@@ -1997,7 -2018,8 +1997,7 @@@
  	ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
  
  	if (!elems.supp_rates) {
 -		printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
 -		       sdata->name);
 +		sdata_info(sdata, "no SuppRates element in AssocResp\n");
  		return false;
  	}
  
@@@ -2037,9 -2059,9 +2037,9 @@@
  	if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
  		err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
  	if (err) {
 -		printk(KERN_DEBUG
 -		       "%s: failed to move station %pM to desired state\n",
 -		       sdata->name, sta->sta.addr);
 +		sdata_info(sdata,
 +			   "failed to move station %pM to desired state\n",
 +			   sta->sta.addr);
  		WARN_ON(__sta_info_destroy(sta));
  		mutex_unlock(&sdata->local->sta_mtx);
  		return false;
@@@ -2122,10 -2144,10 +2122,10 @@@ ieee80211_rx_mgmt_assoc_resp(struct iee
  	status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
  	aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
  
 -	printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
 -	       "status=%d aid=%d)\n",
 -	       sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
 -	       capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
 +	sdata_info(sdata,
 +		   "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
 +		   reassoc ? "Rea" : "A", mgmt->sa,
 +		   capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
  
  	pos = mgmt->u.assoc_resp.variable;
  	ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@@ -2136,9 -2158,9 +2136,9 @@@
  		u32 tu, ms;
  		tu = get_unaligned_le32(elems.timeout_int + 1);
  		ms = tu * 1024 / 1000;
 -		printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
 -		       "comeback duration %u TU (%u ms)\n",
 -		       sdata->name, mgmt->sa, tu, ms);
 +		sdata_info(sdata,
 +			   "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
 +			   mgmt->sa, tu, ms);
  		assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
  		if (ms > IEEE80211_ASSOC_TIMEOUT)
  			run_again(ifmgd, assoc_data->timeout);
@@@ -2148,19 -2170,17 +2148,17 @@@
  	*bss = assoc_data->bss;
  
  	if (status_code != WLAN_STATUS_SUCCESS) {
 -		printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
 -		       sdata->name, mgmt->sa, status_code);
 +		sdata_info(sdata, "%pM denied association (code=%d)\n",
 +			   mgmt->sa, status_code);
  		ieee80211_destroy_assoc_data(sdata, false);
  	} else {
- 		sdata_info(sdata, "associated\n");
- 
  		if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
  			/* oops -- internal error -- send timeout for now */
- 			ieee80211_destroy_assoc_data(sdata, true);
- 			sta_info_destroy_addr(sdata, mgmt->bssid);
+ 			ieee80211_destroy_assoc_data(sdata, false);
  			cfg80211_put_bss(*bss);
  			return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
  		}
 -		printk(KERN_DEBUG "%s: associated\n", sdata->name);
++		sdata_info(sdata, "associated\n");
  
  		/*
  		 * destroy assoc_data afterwards, as otherwise an idle
@@@ -2260,7 -2280,7 +2258,7 @@@ static void ieee80211_rx_mgmt_probe_res
  	if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
  	    ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
  		/* got probe response, continue with auth */
 -		printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
 +		sdata_info(sdata, "direct probe responded\n");
  		ifmgd->auth_data->tries = 0;
  		ifmgd->auth_data->timeout = jiffies;
  		run_again(ifmgd, ifmgd->auth_data->timeout);
@@@ -2396,8 -2416,10 +2394,8 @@@ static void ieee80211_rx_mgmt_beacon(st
  	}
  
  	if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -		net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
 -				    sdata->name);
 -#endif
 +		mlme_dbg_ratelimited(sdata,
 +				     "cancelling probereq poll due to a received beacon\n");
  		mutex_lock(&local->mtx);
  		ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
  		ieee80211_run_deferred_scan(local);
@@@ -2620,8 -2642,8 +2618,8 @@@ static int ieee80211_probe_auth(struct 
  	auth_data->tries++;
  
  	if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
 -		printk(KERN_DEBUG "%s: authentication with %pM timed out\n",
 -		       sdata->name, auth_data->bss->bssid);
 +		sdata_info(sdata, "authentication with %pM timed out\n",
 +			   auth_data->bss->bssid);
  
  		/*
  		 * Most likely AP is not in the range so remove the
@@@ -2633,9 -2655,9 +2631,9 @@@
  	}
  
  	if (auth_data->bss->proberesp_ies) {
 -		printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n",
 -		       sdata->name, auth_data->bss->bssid, auth_data->tries,
 -		       IEEE80211_AUTH_MAX_TRIES);
 +		sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
 +			   auth_data->bss->bssid, auth_data->tries,
 +			   IEEE80211_AUTH_MAX_TRIES);
  
  		auth_data->expected_transaction = 2;
  		ieee80211_send_auth(sdata, 1, auth_data->algorithm,
@@@ -2645,9 -2667,9 +2643,9 @@@
  	} else {
  		const u8 *ssidie;
  
 -		printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
 -		       sdata->name, auth_data->bss->bssid, auth_data->tries,
 -		       IEEE80211_AUTH_MAX_TRIES);
 +		sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
 +			   auth_data->bss->bssid, auth_data->tries,
 +			   IEEE80211_AUTH_MAX_TRIES);
  
  		ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
  		if (!ssidie)
@@@ -2675,8 -2697,8 +2673,8 @@@ static int ieee80211_do_assoc(struct ie
  
  	assoc_data->tries++;
  	if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
 -		printk(KERN_DEBUG "%s: association with %pM timed out\n",
 -		       sdata->name, assoc_data->bss->bssid);
 +		sdata_info(sdata, "association with %pM timed out\n",
 +			   assoc_data->bss->bssid);
  
  		/*
  		 * Most likely AP is not in the range so remove the
@@@ -2687,9 -2709,9 +2685,9 @@@
  		return -ETIMEDOUT;
  	}
  
 -	printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n",
 -	       sdata->name, assoc_data->bss->bssid, assoc_data->tries,
 -	       IEEE80211_ASSOC_MAX_TRIES);
 +	sdata_info(sdata, "associate with %pM (try %d/%d)\n",
 +		   assoc_data->bss->bssid, assoc_data->tries,
 +		   IEEE80211_ASSOC_MAX_TRIES);
  	ieee80211_send_assoc(sdata);
  
  	assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@@ -2762,31 -2784,45 +2760,31 @@@ void ieee80211_sta_work(struct ieee8021
  			ieee80211_reset_ap_probe(sdata);
  		else if (ifmgd->nullfunc_failed) {
  			if (ifmgd->probe_send_count < max_tries) {
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -				wiphy_debug(local->hw.wiphy,
 -					    "%s: No ack for nullfunc frame to"
 -					    " AP %pM, try %d/%i\n",
 -					    sdata->name, bssid,
 -					    ifmgd->probe_send_count, max_tries);
 -#endif
 +				mlme_dbg(sdata,
 +					 "No ack for nullfunc frame to AP %pM, try %d/%i\n",
 +					 bssid, ifmgd->probe_send_count,
 +					 max_tries);
  				ieee80211_mgd_probe_ap_send(sdata);
  			} else {
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -				wiphy_debug(local->hw.wiphy,
 -					    "%s: No ack for nullfunc frame to"
 -					    " AP %pM, disconnecting.\n",
 -					    sdata->name, bssid);
 -#endif
 +				mlme_dbg(sdata,
 +					 "No ack for nullfunc frame to AP %pM, disconnecting.\n",
 +					 bssid);
  				ieee80211_sta_connection_lost(sdata, bssid,
  					WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
  			}
  		} else if (time_is_after_jiffies(ifmgd->probe_timeout))
  			run_again(ifmgd, ifmgd->probe_timeout);
  		else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -			wiphy_debug(local->hw.wiphy,
 -				    "%s: Failed to send nullfunc to AP %pM"
 -				    " after %dms, disconnecting.\n",
 -				    sdata->name,
 -				    bssid, probe_wait_ms);
 -#endif
 +			mlme_dbg(sdata,
 +				 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
 +				 bssid, probe_wait_ms);
  			ieee80211_sta_connection_lost(sdata, bssid,
  				WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
  		} else if (ifmgd->probe_send_count < max_tries) {
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -			wiphy_debug(local->hw.wiphy,
 -				    "%s: No probe response from AP %pM"
 -				    " after %dms, try %d/%i\n",
 -				    sdata->name,
 -				    bssid, probe_wait_ms,
 -				    ifmgd->probe_send_count, max_tries);
 -#endif
 +			mlme_dbg(sdata,
 +				 "No probe response from AP %pM after %dms, try %d/%i\n",
 +				 bssid, probe_wait_ms,
 +				 ifmgd->probe_send_count, max_tries);
  			ieee80211_mgd_probe_ap_send(sdata);
  		} else {
  			/*
@@@ -2901,8 -2937,11 +2899,8 @@@ void ieee80211_sta_restart(struct ieee8
  		sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
  		mutex_lock(&ifmgd->mtx);
  		if (ifmgd->associated) {
 -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 -			wiphy_debug(sdata->local->hw.wiphy,
 -				    "%s: driver requested disconnect after resume.\n",
 -				    sdata->name);
 -#endif
 +			mlme_dbg(sdata,
 +				 "driver requested disconnect after resume\n");
  			ieee80211_sta_connection_lost(sdata,
  				ifmgd->associated->bssid,
  				WLAN_REASON_UNSPECIFIED);
@@@ -2990,7 -3029,7 +2988,7 @@@ static int ieee80211_prep_connection(st
  	struct ieee80211_local *local = sdata->local;
  	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
  	struct ieee80211_bss *bss = (void *)cbss->priv;
 -	struct sta_info *sta;
 +	struct sta_info *sta = NULL;
  	bool have_sta = false;
  	int err;
  	int ht_cfreq;
@@@ -3043,11 -3082,13 +3041,11 @@@
  			 * since we look at probe response/beacon data here
  			 * it should be OK.
  			 */
 -			printk(KERN_DEBUG
 -			       "%s: Wrong control channel: center-freq: %d"
 -			       " ht-cfreq: %d ht->primary_chan: %d"
 -			       " band: %d. Disabling HT.\n",
 -			       sdata->name, cbss->channel->center_freq,
 -			       ht_cfreq, ht_oper->primary_chan,
 -			       cbss->channel->band);
 +			sdata_info(sdata,
 +				   "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
 +				   cbss->channel->center_freq,
 +				   ht_cfreq, ht_oper->primary_chan,
 +				   cbss->channel->band);
  			ht_oper = NULL;
  		}
  	}
@@@ -3071,8 -3112,9 +3069,8 @@@
  	if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
  		/* can only fail due to HT40+/- mismatch */
  		channel_type = NL80211_CHAN_HT20;
 -		printk(KERN_DEBUG
 -		       "%s: disabling 40 MHz due to multi-vif mismatch\n",
 -		       sdata->name);
 +		sdata_info(sdata,
 +			   "disabling 40 MHz due to multi-vif mismatch\n");
  		ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
  		WARN_ON(!ieee80211_set_channel_type(local, sdata,
  						    channel_type));
@@@ -3081,7 -3123,7 +3079,7 @@@
  	local->oper_channel = cbss->channel;
  	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
  
 -	if (!have_sta) {
 +	if (sta) {
  		u32 rates = 0, basic_rates = 0;
  		bool have_higher_than_11mbit;
  		int min_rate = INT_MAX, min_rate_index = -1;
@@@ -3101,8 -3143,9 +3099,8 @@@
  		 * we can connect -- with a warning.
  		 */
  		if (!basic_rates && min_rate_index >= 0) {
 -			printk(KERN_DEBUG
 -			       "%s: No basic rates, using min rate instead.\n",
 -			       sdata->name);
 +			sdata_info(sdata,
 +				   "No basic rates, using min rate instead\n");
  			basic_rates = BIT(min_rate_index);
  		}
  
@@@ -3128,9 -3171,9 +3126,9 @@@
  		err = sta_info_insert(sta);
  		sta = NULL;
  		if (err) {
 -			printk(KERN_DEBUG
 -			       "%s: failed to insert STA entry for the AP (error %d)\n",
 -			       sdata->name, err);
 +			sdata_info(sdata,
 +				   "failed to insert STA entry for the AP (error %d)\n",
 +				   err);
  			return err;
  		}
  	} else
@@@ -3208,7 -3251,8 +3206,7 @@@ int ieee80211_mgd_auth(struct ieee80211
  	if (ifmgd->associated)
  		ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
  
 -	printk(KERN_DEBUG "%s: authenticate with %pM\n",
 -	       sdata->name, req->bss->bssid);
 +	sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
  
  	err = ieee80211_prep_connection(sdata, req->bss, false);
  	if (err)
@@@ -3243,7 -3287,7 +3241,7 @@@ int ieee80211_mgd_assoc(struct ieee8021
  	struct ieee80211_bss *bss = (void *)req->bss->priv;
  	struct ieee80211_mgd_assoc_data *assoc_data;
  	struct ieee80211_supported_band *sband;
 -	const u8 *ssidie;
 +	const u8 *ssidie, *ht_ie;
  	int i, err;
  
  	ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
@@@ -3291,15 -3335,11 +3289,15 @@@
  	 * We can set this to true for non-11n hardware, that'll be checked
  	 * separately along with the peer capabilities.
  	 */
 -	for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
 +	for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
  		if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
  		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
 -		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
 +		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
  			ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
 +			netdev_info(sdata->dev,
 +				    "disabling HT due to WEP/TKIP use\n");
 +		}
 +	}
  
  	if (req->flags & ASSOC_REQ_DISABLE_HT)
  		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
@@@ -3307,11 -3347,8 +3305,11 @@@
  	/* Also disable HT if we don't support it or the AP doesn't use WMM */
  	sband = local->hw.wiphy->bands[req->bss->channel->band];
  	if (!sband->ht_cap.ht_supported ||
 -	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
 +	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
  		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
 +		netdev_info(sdata->dev,
 +			    "disabling HT as WMM/QoS is not supported\n");
 +	}
  
  	memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
  	memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
@@@ -3337,13 -3374,8 +3335,13 @@@
  			  (local->hw.queues >= IEEE80211_NUM_ACS);
  	assoc_data->supp_rates = bss->supp_rates;
  	assoc_data->supp_rates_len = bss->supp_rates_len;
 -	assoc_data->ht_operation_ie =
 -		ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
 +
 +	ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
 +	if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation))
 +		assoc_data->ap_ht_param =
 +			((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
 +	else
 +		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
  
  	if (bss->wmm_used && bss->uapsd_supported &&
  	    (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@@ -3390,8 -3422,8 +3388,8 @@@
  		 * Wait up to one beacon interval ...
  		 * should this be more if we miss one?
  		 */
 -		printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
 -		       sdata->name, ifmgd->bssid);
 +		sdata_info(sdata, "waiting for beacon from %pM\n",
 +			   ifmgd->bssid);
  		assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
  	} else {
  		assoc_data->have_beacon = true;
@@@ -3410,8 -3442,8 +3408,8 @@@
  				corrupt_type = "beacon";
  		} else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
  			corrupt_type = "probe response";
 -		printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n",
 -		       sdata->name, corrupt_type);
 +		sdata_info(sdata, "associating with AP with corrupt %s\n",
 +			   corrupt_type);
  	}
  
  	err = 0;
@@@ -3440,9 -3472,9 +3438,9 @@@ int ieee80211_mgd_deauth(struct ieee802
  		return 0;
  	}
  
 -	printk(KERN_DEBUG
 -	       "%s: deauthenticating from %pM by local choice (reason=%d)\n",
 -	       sdata->name, req->bssid, req->reason_code);
 +	sdata_info(sdata,
 +		   "deauthenticating from %pM by local choice (reason=%d)\n",
 +		   req->bssid, req->reason_code);
  
  	if (ifmgd->associated &&
  	    ether_addr_equal(ifmgd->associated->bssid, req->bssid))
@@@ -3484,9 -3516,8 +3482,9 @@@ int ieee80211_mgd_disassoc(struct ieee8
  		return -ENOLINK;
  	}
  
 -	printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
 -	       sdata->name, req->bss->bssid, req->reason_code);
 +	sdata_info(sdata,
 +		   "disassociating from %pM by local choice (reason=%d)\n",
 +		   req->bss->bssid, req->reason_code);
  
  	memcpy(bssid, req->bss->bssid, ETH_ALEN);
  	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
@@@ -3527,3 -3558,10 +3525,3 @@@ void ieee80211_cqm_rssi_notify(struct i
  	cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
  }
  EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
 -
 -unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
 -{
 -	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 -	return sdata->dev->operstate;
 -}
 -EXPORT_SYMBOL(ieee80211_get_operstate);
diff --combined net/nfc/llcp/sock.c
index 2c0b317,e06d458..05ca5a6
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@@ -111,7 -111,7 +111,7 @@@ static int llcp_sock_bind(struct socke
  	}
  
  	llcp_sock->dev = dev;
 -	llcp_sock->local = local;
 +	llcp_sock->local = nfc_llcp_local_get(local);
  	llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
  	llcp_sock->service_name_len = min_t(unsigned int,
  					    llcp_addr.service_name_len,
@@@ -124,7 -124,7 +124,7 @@@
  	if (llcp_sock->ssap == LLCP_MAX_SAP)
  		goto put_dev;
  
 -	local->sockets[llcp_sock->ssap] = llcp_sock;
 +	nfc_llcp_sock_link(&local->sockets, sk);
  
  	pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
  
@@@ -292,7 -292,7 +292,7 @@@ static int llcp_sock_getname(struct soc
  
  	pr_debug("%p\n", sk);
  
- 	if (llcp_sock == NULL)
+ 	if (llcp_sock == NULL || llcp_sock->dev == NULL)
  		return -EBADFD;
  
  	addr->sa_family = AF_NFC;
@@@ -382,6 -382,15 +382,6 @@@ static int llcp_sock_release(struct soc
  		goto out;
  	}
  
 -	mutex_lock(&local->socket_lock);
 -
 -	if (llcp_sock == local->sockets[llcp_sock->ssap])
 -		local->sockets[llcp_sock->ssap] = NULL;
 -	else
 -		list_del_init(&llcp_sock->list);
 -
 -	mutex_unlock(&local->socket_lock);
 -
  	lock_sock(sk);
  
  	/* Send a DISC */
@@@ -406,12 -415,14 +406,12 @@@
  		}
  	}
  
 -	/* Freeing the SAP */
 -	if ((sk->sk_state == LLCP_CONNECTED
 -	     && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
 -	    sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
 -		nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
 +	nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
  
  	release_sock(sk);
  
 +	nfc_llcp_sock_unlink(&local->sockets, sk);
 +
  out:
  	sock_orphan(sk);
  	sock_put(sk);
@@@ -479,8 -490,7 +479,8 @@@ static int llcp_sock_connect(struct soc
  	}
  
  	llcp_sock->dev = dev;
 -	llcp_sock->local = local;
 +	llcp_sock->local = nfc_llcp_local_get(local);
 +	llcp_sock->miu = llcp_sock->local->remote_miu;
  	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
  	if (llcp_sock->ssap == LLCP_SAP_MAX) {
  		ret = -ENOMEM;
@@@ -498,26 -508,21 +498,26 @@@
  					  llcp_sock->service_name_len,
  					  GFP_KERNEL);
  
 -	local->sockets[llcp_sock->ssap] = llcp_sock;
 +	nfc_llcp_sock_link(&local->connecting_sockets, sk);
  
  	ret = nfc_llcp_send_connect(llcp_sock);
  	if (ret)
 -		goto put_dev;
 +		goto sock_unlink;
  
  	ret = sock_wait_state(sk, LLCP_CONNECTED,
  			      sock_sndtimeo(sk, flags & O_NONBLOCK));
  	if (ret)
 -		goto put_dev;
 +		goto sock_unlink;
  
  	release_sock(sk);
  
  	return 0;
  
 +sock_unlink:
 +	nfc_llcp_put_ssap(local, llcp_sock->ssap);
 +
 +	nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
 +
  put_dev:
  	nfc_put_device(dev);
  
@@@ -682,14 -687,13 +682,14 @@@ struct sock *nfc_llcp_sock_alloc(struc
  
  	llcp_sock->ssap = 0;
  	llcp_sock->dsap = LLCP_SAP_SDP;
 +	llcp_sock->rw = LLCP_DEFAULT_RW;
 +	llcp_sock->miu = LLCP_DEFAULT_MIU;
  	llcp_sock->send_n = llcp_sock->send_ack_n = 0;
  	llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
  	llcp_sock->remote_ready = 1;
  	skb_queue_head_init(&llcp_sock->tx_queue);
  	skb_queue_head_init(&llcp_sock->tx_pending_queue);
  	skb_queue_head_init(&llcp_sock->tx_backlog_queue);
 -	INIT_LIST_HEAD(&llcp_sock->list);
  	INIT_LIST_HEAD(&llcp_sock->accept_queue);
  
  	if (sock != NULL)
@@@ -700,6 -704,8 +700,6 @@@
  
  void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
  {
 -	struct nfc_llcp_local *local = sock->local;
 -
  	kfree(sock->service_name);
  
  	skb_queue_purge(&sock->tx_queue);
@@@ -708,9 -714,12 +708,9 @@@
  
  	list_del_init(&sock->accept_queue);
  
 -	if (local != NULL && sock == local->sockets[sock->ssap])
 -		local->sockets[sock->ssap] = NULL;
 -	else
 -		list_del_init(&sock->list);
 -
  	sock->parent = NULL;
 +
 +	nfc_llcp_local_put(sock->local);
  }
  
  static int llcp_sock_create(struct net *net, struct socket *sock,

-- 
LinuxNextTracking


More information about the linux-merge mailing list